diff --git a/integration/indexes_compat_test.go b/integration/indexes_compat_test.go index fd141abacc25..f6f54fbb5096 100644 --- a/integration/indexes_compat_test.go +++ b/integration/indexes_compat_test.go @@ -70,16 +70,21 @@ func TestIndexesCreate(t *testing.T) { resultType compatTestCaseResultType // defaults to nonEmptyResult skip string // optional, skip test with a specified reason }{ - "empty": { + "Empty": { models: []mongo.IndexModel{}, resultType: emptyResult, }, - "single-index": { + "SingleIndex": { models: []mongo.IndexModel{ {Keys: bson.D{{"v", -1}}}, }, }, - "duplicate_id": { + "SingleIndexMultiField": { + models: []mongo.IndexModel{ + {Keys: bson.D{{"foo", 1}, {"bar", -1}}}, + }, + }, + "DuplicateID": { models: []mongo.IndexModel{ { Keys: bson.D{{"_id", 1}}, // this index is already created by default @@ -87,17 +92,23 @@ func TestIndexesCreate(t *testing.T) { }, skip: "https://github.com/FerretDB/FerretDB/issues/2311", }, - "non-existent-field": { + "DescendingID": { + models: []mongo.IndexModel{ + {Keys: bson.D{{"_id", -1}}}, + }, + resultType: emptyResult, + }, + "NonExistentField": { models: []mongo.IndexModel{ {Keys: bson.D{{"field-does-not-exist", 1}}}, }, }, - "dot-notation": { + "DotNotation": { models: []mongo.IndexModel{ {Keys: bson.D{{"v.foo", 1}}}, }, }, - "dangerous-key": { + "DangerousKey": { models: []mongo.IndexModel{ { Keys: bson.D{ @@ -107,14 +118,14 @@ func TestIndexesCreate(t *testing.T) { }, }, }, - "same-key": { + "SameKey": { models: []mongo.IndexModel{ {Keys: bson.D{{"v", -1}, {"v", 1}}}, }, resultType: emptyResult, altErrorMsg: `Error in specification { v: -1, v: 1 }, the field "v" appears multiple times`, }, - "custom-name": { + "CustomName": { models: []mongo.IndexModel{ { Keys: bson.D{{"foo", 1}, {"bar", -1}}, @@ -123,20 +134,27 @@ func TestIndexesCreate(t *testing.T) { }, }, - "multi-direction-different-indexes": { + "MultiDirectionDifferentIndexes": { models: []mongo.IndexModel{ {Keys: bson.D{{"v", -1}}}, {Keys: bson.D{{"v", 1}}}, }, }, - "multi-order": { + "MultiOrder": { models: []mongo.IndexModel{ {Keys: bson.D{{"foo", -1}}}, {Keys: bson.D{{"v", 1}}}, {Keys: bson.D{{"bar", 1}}}, }, }, - "build-same-index": { + "MultiSameKeyUsed": { + models: []mongo.IndexModel{ + {Keys: bson.D{{"foo", 1}}}, + {Keys: bson.D{{"foo", 1}, {"v", 1}}}, + {Keys: bson.D{{"bar", 1}}}, + }, + }, + "BuildSameIndex": { models: []mongo.IndexModel{ {Keys: bson.D{{"v", 1}}}, {Keys: bson.D{{"v", 1}}}, @@ -144,7 +162,7 @@ func TestIndexesCreate(t *testing.T) { resultType: emptyResult, skip: "https://github.com/FerretDB/FerretDB/issues/2311", }, - "multi-with-invalid": { + "MultiWithInvalid": { models: []mongo.IndexModel{ { Keys: bson.D{{"foo", 1}, {"bar", 1}, {"v", -1}}, @@ -156,7 +174,7 @@ func TestIndexesCreate(t *testing.T) { resultType: emptyResult, altErrorMsg: `Error in specification { v: -1, v: 1 }, the field "v" appears multiple times`, }, - "same-key-different-names": { + "SameKeyDifferentNames": { models: []mongo.IndexModel{ { Keys: bson.D{{"v", -1}}, @@ -170,7 +188,7 @@ func TestIndexesCreate(t *testing.T) { resultType: emptyResult, altErrorMsg: "One of the specified indexes already exists with a different name", }, - "same-name-different-keys": { + "SameNameDifferentKeys": { models: []mongo.IndexModel{ { Keys: bson.D{{"foo", -1}}, @@ -385,3 +403,340 @@ func TestIndexesCreateRunCommand(t *testing.T) { }) } } + +func TestIndexesDrop(t *testing.T) { + setup.SkipForTigrisWithReason(t, "Indexes are not supported for Tigris") + + t.Parallel() + + for name, tc := range map[string]struct { //nolint:vet // for readability + dropIndexName string // name of a single index to drop + dropAll bool // set true for drop all indexes, if true dropIndexName must be empty. + resultType compatTestCaseResultType // defaults to nonEmptyResult + toCreate []mongo.IndexModel // optional, if not nil create indexes before dropping + }{ + "DropAllCommand": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"v", 1}}}, + {Keys: bson.D{{"foo", -1}}}, + {Keys: bson.D{{"bar", 1}}}, + {Keys: bson.D{{"pam.pam", -1}}}, + }, + dropAll: true, + }, + "ID": { + dropIndexName: "_id_", + resultType: emptyResult, + }, + "AscendingValue": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"v", 1}}}, + }, + dropIndexName: "v_1", + }, + "DescendingValue": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"v", -1}}}, + }, + dropIndexName: "v_-1", + }, + "AsteriskWithDropOne": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"v", -1}}}, + }, + dropIndexName: "*", + resultType: emptyResult, + }, + "NonExistent": { + dropIndexName: "nonexistent_1", + resultType: emptyResult, + }, + } { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Helper() + t.Parallel() + + if tc.dropAll { + require.Empty(t, tc.dropIndexName, "index name must be empty when dropping all indexes") + } + + // It's enough to use a single provider for drop indexes test as indexes work the same for different collections. + s := setup.SetupCompatWithOpts(t, &setup.SetupCompatOpts{ + Providers: []shareddata.Provider{shareddata.Composites}, + AddNonExistentCollection: true, + }) + ctx, targetCollections, compatCollections := s.Ctx, s.TargetCollections, s.CompatCollections + + var nonEmptyResults bool + for i := range targetCollections { + targetCollection := targetCollections[i] + compatCollection := compatCollections[i] + + t.Run(targetCollection.Name(), func(t *testing.T) { + t.Helper() + + if tc.toCreate != nil { + _, targetErr := targetCollection.Indexes().CreateMany(ctx, tc.toCreate) + _, compatErr := compatCollection.Indexes().CreateMany(ctx, tc.toCreate) + require.NoError(t, compatErr) + require.NoError(t, targetErr) + } + + var targetRes, compatRes bson.Raw + var targetErr, compatErr error + + if tc.dropAll { + targetRes, targetErr = targetCollection.Indexes().DropAll(ctx) + compatRes, compatErr = compatCollection.Indexes().DropAll(ctx) + } else { + targetRes, targetErr = targetCollection.Indexes().DropOne(ctx, tc.dropIndexName) + compatRes, compatErr = compatCollection.Indexes().DropOne(ctx, tc.dropIndexName) + } + + require.Equal(t, compatErr, targetErr) + require.Equal(t, compatRes, targetRes) + + if targetErr == nil { + nonEmptyResults = true + } + + // List indexes to see they are identical after drop. + targetCur, targetErr := targetCollection.Indexes().List(ctx) + compatCur, compatErr := compatCollection.Indexes().List(ctx) + + require.NoError(t, compatErr) + require.Equal(t, compatErr, targetErr) + + targetIndexes := FetchAll(t, ctx, targetCur) + compatIndexes := FetchAll(t, ctx, compatCur) + + require.Equal(t, compatIndexes, targetIndexes) + }) + } + + switch tc.resultType { + case nonEmptyResult: + require.True(t, nonEmptyResults, "expected non-empty results (some documents should be modified)") + case emptyResult: + require.False(t, nonEmptyResults, "expected empty results (no documents should be modified)") + default: + t.Fatalf("unknown result type %v", tc.resultType) + } + }) + } +} + +func TestIndexesDropRunCommand(t *testing.T) { + setup.SkipForTigrisWithReason(t, "Indexes are not supported for Tigris") + + t.Parallel() + + for name, tc := range map[string]struct { //nolint:vet // for readability + toCreate []mongo.IndexModel // optional, if set, create the given indexes before drop is called + toDrop any // index to drop + resultType compatTestCaseResultType // defaults to nonEmptyResult + command bson.D // optional, if set it runs this command instead of dropping toDrop + altErrorMsg string // optional, alternative error message in case of error + skip string // optional, skip test with a specified reason + }{ + "InvalidType": { + toDrop: true, + resultType: emptyResult, + altErrorMsg: `BSON field 'dropIndexes.index' is the wrong type 'bool', expected types '[string, object]'`, + }, + "MultipleIndexesByName": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"v", -1}}}, + {Keys: bson.D{{"v", 1}, {"foo", 1}}}, + {Keys: bson.D{{"v.foo", -1}}}, + }, + toDrop: bson.A{"v_-1", "v_1_foo_1"}, + }, + "MultipleIndexesByKey": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"v", -1}}}, + {Keys: bson.D{{"v.foo", -1}}}, + }, + toDrop: bson.A{bson.D{{"v", -1}}, bson.D{{"v.foo", -1}}}, + resultType: emptyResult, + altErrorMsg: `BSON field 'dropIndexes.index' is the wrong type 'array', expected types '[string, object]'`, + }, + "NonExistentMultipleIndexes": { + toDrop: bson.A{"non-existent", "invalid"}, + resultType: emptyResult, + }, + "InvalidMultipleIndexType": { + toDrop: bson.A{1}, + resultType: emptyResult, + altErrorMsg: `BSON field 'dropIndexes.index' is the wrong type 'array', expected types '[string, object]'`, + }, + "DocumentIndex": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"v", -1}}}, + }, + toDrop: bson.D{{"v", -1}}, + }, + "InvalidDocumentIndex": { + toDrop: bson.D{{"invalid", "invalid"}}, + resultType: emptyResult, + skip: "https://github.com/FerretDB/FerretDB/issues/2311", + }, + "NonExistentKey": { + toDrop: bson.D{{"non-existent", 1}}, + resultType: emptyResult, + }, + "DocumentIndexID": { + toDrop: bson.D{{"_id", 1}}, + resultType: emptyResult, + }, + "DropAllExpression": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"v", -1}}}, + {Keys: bson.D{{"foo.bar", 1}}}, + {Keys: bson.D{{"foo", 1}, {"bar", 1}}}, + }, + toDrop: "*", + }, + "MissingIndexField": { + command: bson.D{ + {"dropIndexes", "collection"}, + }, + resultType: emptyResult, + }, + "NonExistentDescendingID": { + toDrop: bson.D{{"_id", -1}}, + resultType: emptyResult, + }, + "MultipleKeyIndex": { + toCreate: []mongo.IndexModel{ + {Keys: bson.D{{"_id", -1}, {"v", 1}}}, + }, + toDrop: bson.D{ + {"_id", -1}, + {"v", 1}, + }, + }, + "NonExistentMultipleKeyIndex": { + toDrop: bson.D{ + {"non-existent1", -1}, + {"non-existent2", -1}, + }, + resultType: emptyResult, + }, + } { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + if tc.skip != "" { + t.Skip(tc.skip) + } + + t.Helper() + t.Parallel() + + if tc.command != nil { + require.Nil(t, tc.toDrop, "toDrop name must be nil when using command") + } + + // It's enough to use a single provider for drop indexes test as indexes work the same for different collections. + s := setup.SetupCompatWithOpts(t, &setup.SetupCompatOpts{ + Providers: []shareddata.Provider{shareddata.Composites}, + AddNonExistentCollection: true, + }) + ctx, targetCollections, compatCollections := s.Ctx, s.TargetCollections, s.CompatCollections + + var nonEmptyResults bool + for i := range targetCollections { + targetCollection := targetCollections[i] + compatCollection := compatCollections[i] + + t.Run(targetCollection.Name(), func(t *testing.T) { + t.Helper() + + if tc.toCreate != nil { + _, targetErr := targetCollection.Indexes().CreateMany(ctx, tc.toCreate) + _, compatErr := compatCollection.Indexes().CreateMany(ctx, tc.toCreate) + require.NoError(t, compatErr) + require.NoError(t, targetErr) + + // List indexes to see they are identical after creation. + targetCur, targetListErr := targetCollection.Indexes().List(ctx) + compatCur, compatListErr := compatCollection.Indexes().List(ctx) + + require.NoError(t, compatListErr) + require.NoError(t, targetListErr) + + targetList := FetchAll(t, ctx, targetCur) + compatList := FetchAll(t, ctx, compatCur) + + require.Equal(t, compatList, targetList) + } + + targetCommand := bson.D{ + {"dropIndexes", targetCollection.Name()}, + {"index", tc.toDrop}, + } + + compatCommand := bson.D{ + {"dropIndexes", compatCollection.Name()}, + {"index", tc.toDrop}, + } + + if tc.command != nil { + targetCommand = tc.command + compatCommand = tc.command + } + + var targetRes bson.D + targetErr := targetCollection.Database().RunCommand(ctx, targetCommand).Decode(&targetRes) + + var compatRes bson.D + compatErr := compatCollection.Database().RunCommand(ctx, compatCommand).Decode(&compatRes) + + if tc.resultType == emptyResult { + require.Nil(t, targetRes) + require.Nil(t, compatRes) + } + + if tc.altErrorMsg != "" { + AssertMatchesCommandError(t, compatErr, targetErr) + + var expectedErr mongo.CommandError + require.True(t, errors.As(compatErr, &expectedErr)) + expectedErr.Raw = nil + AssertEqualAltError(t, expectedErr, tc.altErrorMsg, targetErr) + } else { + require.Equal(t, compatErr, targetErr) + } + + require.Equal(t, compatRes, targetRes) + + if compatErr == nil { + nonEmptyResults = true + } + + // List indexes to see they are identical after deletion. + targetCur, targetListErr := targetCollection.Indexes().List(ctx) + compatCur, compatListErr := compatCollection.Indexes().List(ctx) + + require.NoError(t, compatListErr) + assert.Equal(t, compatListErr, targetListErr) + + targetList := FetchAll(t, ctx, targetCur) + compatList := FetchAll(t, ctx, compatCur) + + assert.Equal(t, compatList, targetList) + }) + } + + switch tc.resultType { + case nonEmptyResult: + require.True(t, nonEmptyResults, "expected non-empty results (some indexes should be deleted)") + case emptyResult: + require.False(t, nonEmptyResults, "expected empty results (no indexes should be deleted)") + default: + t.Fatalf("unknown result type %v", tc.resultType) + } + }) + } +} diff --git a/internal/handlers/common/msg_listcommands.go b/internal/handlers/common/msg_listcommands.go index 8b5c2f01eff3..525a83b6d182 100644 --- a/internal/handlers/common/msg_listcommands.go +++ b/internal/handlers/common/msg_listcommands.go @@ -113,6 +113,10 @@ var Commands = map[string]command{ Help: "Drops production database.", Handler: handlers.Interface.MsgDropDatabase, }, + "dropIndexes": { + Help: "Drops indexes on a collection.", + Handler: handlers.Interface.MsgDropIndexes, + }, "explain": { Help: "Returns the execution plan.", Handler: handlers.Interface.MsgExplain, diff --git a/internal/handlers/commonerrors/error.go b/internal/handlers/commonerrors/error.go index cde41d3535b3..29ef2e31dcbe 100644 --- a/internal/handlers/commonerrors/error.go +++ b/internal/handlers/commonerrors/error.go @@ -47,6 +47,9 @@ const ( // ErrNamespaceNotFound indicates that a collection is not found. ErrNamespaceNotFound = ErrorCode(26) // NamespaceNotFound + // ErrIndexNotFound indicates that a index is not found for the given name. + ErrIndexNotFound = ErrorCode(27) // IndexNotFound + // ErrUnsuitableValueType indicates that field could not be created for given value. ErrUnsuitableValueType = ErrorCode(28) // UnsuitableValueType @@ -68,6 +71,9 @@ const ( // ErrCommandNotFound indicates unknown command input. ErrCommandNotFound = ErrorCode(59) // CommandNotFound + // ErrInvalidOptions indicates that _id index cannot be deleted. + ErrInvalidOptions = ErrorCode(72) // InvalidOptions + // ErrCannotCreateIndex indicates that index creation process failed because some data are not valid. ErrCannotCreateIndex = ErrorCode(67) // CannotCreateIndex diff --git a/internal/handlers/commonerrors/errorcode_string.go b/internal/handlers/commonerrors/errorcode_string.go index 3f0c01238dd8..cc6c73f3bbef 100644 --- a/internal/handlers/commonerrors/errorcode_string.go +++ b/internal/handlers/commonerrors/errorcode_string.go @@ -14,6 +14,7 @@ func _() { _ = x[ErrFailedToParse-9] _ = x[ErrTypeMismatch-14] _ = x[ErrNamespaceNotFound-26] + _ = x[ErrIndexNotFound-27] _ = x[ErrUnsuitableValueType-28] _ = x[ErrConflictingUpdateOperators-40] _ = x[ErrCursorNotFound-43] @@ -21,6 +22,7 @@ func _() { _ = x[ErrInvalidID-53] _ = x[ErrEmptyName-56] _ = x[ErrCommandNotFound-59] + _ = x[ErrInvalidOptions-72] _ = x[ErrCannotCreateIndex-67] _ = x[ErrInvalidNamespace-73] _ = x[ErrIndexOptionsConflict-85] @@ -64,7 +66,7 @@ func _() { _ = x[ErrDuplicateField-4822819] } -const _ErrorCode_name = "UnsetInternalErrorBadValueFailedToParseTypeMismatchNamespaceNotFoundUnsuitableValueTypeConflictingUpdateOperatorsCursorNotFoundNamespaceExistsInvalidIDEmptyNameCommandNotFoundCannotCreateIndexInvalidNamespaceIndexOptionsConflictIndexKeySpecsConflictOperationFailedDocumentValidationFailureNotImplementedMechanismUnavailableLocation11000Location15947Location15948Location15955Location15959Location15973Location15974Location15975Location15976Location15998Location16872Location17276Location28667Location28724Location31253Location31254Location40156Location40157Location40158Location40160Location40234Location40237Location40238Location40323Location40352Location40414Location40415Location50840Location51024Location51075Location51091Location51108Location4822819" +const _ErrorCode_name = "UnsetInternalErrorBadValueFailedToParseTypeMismatchNamespaceNotFoundIndexNotFoundUnsuitableValueTypeConflictingUpdateOperatorsCursorNotFoundNamespaceExistsInvalidIDEmptyNameCommandNotFoundCannotCreateIndexInvalidOptionsInvalidNamespaceIndexOptionsConflictIndexKeySpecsConflictOperationFailedDocumentValidationFailureNotImplementedMechanismUnavailableLocation11000Location15947Location15948Location15955Location15959Location15973Location15974Location15975Location15976Location15998Location16872Location17276Location28667Location28724Location31253Location31254Location40156Location40157Location40158Location40160Location40234Location40237Location40238Location40323Location40352Location40414Location40415Location50840Location51024Location51075Location51091Location51108Location4822819" var _ErrorCode_map = map[ErrorCode]string{ 0: _ErrorCode_name[0:5], @@ -73,54 +75,56 @@ var _ErrorCode_map = map[ErrorCode]string{ 9: _ErrorCode_name[26:39], 14: _ErrorCode_name[39:51], 26: _ErrorCode_name[51:68], - 28: _ErrorCode_name[68:87], - 40: _ErrorCode_name[87:113], - 43: _ErrorCode_name[113:127], - 48: _ErrorCode_name[127:142], - 53: _ErrorCode_name[142:151], - 56: _ErrorCode_name[151:160], - 59: _ErrorCode_name[160:175], - 67: _ErrorCode_name[175:192], - 73: _ErrorCode_name[192:208], - 85: _ErrorCode_name[208:228], - 86: _ErrorCode_name[228:249], - 96: _ErrorCode_name[249:264], - 121: _ErrorCode_name[264:289], - 238: _ErrorCode_name[289:303], - 334: _ErrorCode_name[303:323], - 11000: _ErrorCode_name[323:336], - 15947: _ErrorCode_name[336:349], - 15948: _ErrorCode_name[349:362], - 15955: _ErrorCode_name[362:375], - 15959: _ErrorCode_name[375:388], - 15973: _ErrorCode_name[388:401], - 15974: _ErrorCode_name[401:414], - 15975: _ErrorCode_name[414:427], - 15976: _ErrorCode_name[427:440], - 15998: _ErrorCode_name[440:453], - 16872: _ErrorCode_name[453:466], - 17276: _ErrorCode_name[466:479], - 28667: _ErrorCode_name[479:492], - 28724: _ErrorCode_name[492:505], - 31253: _ErrorCode_name[505:518], - 31254: _ErrorCode_name[518:531], - 40156: _ErrorCode_name[531:544], - 40157: _ErrorCode_name[544:557], - 40158: _ErrorCode_name[557:570], - 40160: _ErrorCode_name[570:583], - 40234: _ErrorCode_name[583:596], - 40237: _ErrorCode_name[596:609], - 40238: _ErrorCode_name[609:622], - 40323: _ErrorCode_name[622:635], - 40352: _ErrorCode_name[635:648], - 40414: _ErrorCode_name[648:661], - 40415: _ErrorCode_name[661:674], - 50840: _ErrorCode_name[674:687], - 51024: _ErrorCode_name[687:700], - 51075: _ErrorCode_name[700:713], - 51091: _ErrorCode_name[713:726], - 51108: _ErrorCode_name[726:739], - 4822819: _ErrorCode_name[739:754], + 27: _ErrorCode_name[68:81], + 28: _ErrorCode_name[81:100], + 40: _ErrorCode_name[100:126], + 43: _ErrorCode_name[126:140], + 48: _ErrorCode_name[140:155], + 53: _ErrorCode_name[155:164], + 56: _ErrorCode_name[164:173], + 59: _ErrorCode_name[173:188], + 67: _ErrorCode_name[188:205], + 72: _ErrorCode_name[205:219], + 73: _ErrorCode_name[219:235], + 85: _ErrorCode_name[235:255], + 86: _ErrorCode_name[255:276], + 96: _ErrorCode_name[276:291], + 121: _ErrorCode_name[291:316], + 238: _ErrorCode_name[316:330], + 334: _ErrorCode_name[330:350], + 11000: _ErrorCode_name[350:363], + 15947: _ErrorCode_name[363:376], + 15948: _ErrorCode_name[376:389], + 15955: _ErrorCode_name[389:402], + 15959: _ErrorCode_name[402:415], + 15973: _ErrorCode_name[415:428], + 15974: _ErrorCode_name[428:441], + 15975: _ErrorCode_name[441:454], + 15976: _ErrorCode_name[454:467], + 15998: _ErrorCode_name[467:480], + 16872: _ErrorCode_name[480:493], + 17276: _ErrorCode_name[493:506], + 28667: _ErrorCode_name[506:519], + 28724: _ErrorCode_name[519:532], + 31253: _ErrorCode_name[532:545], + 31254: _ErrorCode_name[545:558], + 40156: _ErrorCode_name[558:571], + 40157: _ErrorCode_name[571:584], + 40158: _ErrorCode_name[584:597], + 40160: _ErrorCode_name[597:610], + 40234: _ErrorCode_name[610:623], + 40237: _ErrorCode_name[623:636], + 40238: _ErrorCode_name[636:649], + 40323: _ErrorCode_name[649:662], + 40352: _ErrorCode_name[662:675], + 40414: _ErrorCode_name[675:688], + 40415: _ErrorCode_name[688:701], + 50840: _ErrorCode_name[701:714], + 51024: _ErrorCode_name[714:727], + 51075: _ErrorCode_name[727:740], + 51091: _ErrorCode_name[740:753], + 51108: _ErrorCode_name[753:766], + 4822819: _ErrorCode_name[766:781], } func (i ErrorCode) String() string { diff --git a/internal/handlers/dummy/msg_dropindexes.go b/internal/handlers/dummy/msg_dropindexes.go new file mode 100644 index 000000000000..469c25e202b4 --- /dev/null +++ b/internal/handlers/dummy/msg_dropindexes.go @@ -0,0 +1,27 @@ +// Copyright 2021 FerretDB Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dummy + +import ( + "context" + + "github.com/FerretDB/FerretDB/internal/util/must" + "github.com/FerretDB/FerretDB/internal/wire" +) + +// MsgDropIndexes implements HandlerInterface. +func (h *Handler) MsgDropIndexes(ctx context.Context, msg *wire.OpMsg) (*wire.OpMsg, error) { + return nil, notImplemented(must.NotFail(msg.Document()).Command()) +} diff --git a/internal/handlers/hana/msg_dropindexes.go b/internal/handlers/hana/msg_dropindexes.go new file mode 100644 index 000000000000..43652eacc8b6 --- /dev/null +++ b/internal/handlers/hana/msg_dropindexes.go @@ -0,0 +1,27 @@ +// Copyright 2021 FerretDB Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hana + +import ( + "context" + + "github.com/FerretDB/FerretDB/internal/util/must" + "github.com/FerretDB/FerretDB/internal/wire" +) + +// MsgDropIndexes implements HandlerInterface. +func (h *Handler) MsgDropIndexes(ctx context.Context, msg *wire.OpMsg) (*wire.OpMsg, error) { + return nil, notImplemented(must.NotFail(msg.Document()).Command()) +} diff --git a/internal/handlers/handlers.go b/internal/handlers/handlers.go index 4a0aecbf292a..004f0e69934c 100644 --- a/internal/handlers/handlers.go +++ b/internal/handlers/handlers.go @@ -87,6 +87,9 @@ type Interface interface { // MsgDrop drops the collection. MsgDrop(ctx context.Context, msg *wire.OpMsg) (*wire.OpMsg, error) + // MsgDropIndexes drops indexes on a collection. + MsgDropIndexes(ctx context.Context, msg *wire.OpMsg) (*wire.OpMsg, error) + // MsgDropDatabase drops production database. MsgDropDatabase(ctx context.Context, msg *wire.OpMsg) (*wire.OpMsg, error) diff --git a/internal/handlers/pg/msg_createindexes.go b/internal/handlers/pg/msg_createindexes.go index ff0ac47dad59..471030e23dd9 100644 --- a/internal/handlers/pg/msg_createindexes.go +++ b/internal/handlers/pg/msg_createindexes.go @@ -174,6 +174,22 @@ func processIndexOptions(indexDoc *types.Document) (*pgdb.Index, error) { ) } + // Special case: if keyDocs consists of a {"_id": -1} only, an error should be returned. + if keyDoc.Len() == 1 { + var val any + var order int64 + + if val, err = keyDoc.Get("_id"); err == nil { + if order, err = common.GetWholeNumberParam(val); err == nil && order == -1 { + return nil, commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrBadValue, + "The field 'key' for an _id index must be {_id: 1}, but got { _id: -1 }", + "createIndexes", + ) + } + } + } + index.Key, err = processIndexKey(keyDoc) if err != nil { return nil, err diff --git a/internal/handlers/pg/msg_dropindexes.go b/internal/handlers/pg/msg_dropindexes.go new file mode 100644 index 000000000000..e5ecf2ade4db --- /dev/null +++ b/internal/handlers/pg/msg_dropindexes.go @@ -0,0 +1,236 @@ +// Copyright 2021 FerretDB Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pg + +import ( + "context" + "errors" + "fmt" + + "github.com/jackc/pgx/v4" + + "github.com/FerretDB/FerretDB/internal/handlers/common" + "github.com/FerretDB/FerretDB/internal/handlers/commonerrors" + "github.com/FerretDB/FerretDB/internal/handlers/pg/pgdb" + "github.com/FerretDB/FerretDB/internal/handlers/pg/pjson" + "github.com/FerretDB/FerretDB/internal/types" + "github.com/FerretDB/FerretDB/internal/util/iterator" + "github.com/FerretDB/FerretDB/internal/util/lazyerrors" + "github.com/FerretDB/FerretDB/internal/util/must" + "github.com/FerretDB/FerretDB/internal/wire" +) + +// MsgDropIndexes implements HandlerInterface. +func (h *Handler) MsgDropIndexes(ctx context.Context, msg *wire.OpMsg) (*wire.OpMsg, error) { + dbPool, err := h.DBPool(ctx) + if err != nil { + return nil, lazyerrors.Error(err) + } + + document, err := msg.Document() + if err != nil { + return nil, lazyerrors.Error(err) + } + + common.Ignored(document, h.L, "writeConcern", "comment") + + command := document.Command() + + db, err := common.GetRequiredParam[string](document, "$db") + if err != nil { + return nil, err + } + + collection, err := common.GetRequiredParam[string](document, command) + if err != nil { + return nil, err + } + + var nIndexesWas int32 + var responseMsg string + + err = dbPool.InTransactionRetry(ctx, func(tx pgx.Tx) error { + nIndexesWas, responseMsg, err = processIndexDrop(ctx, tx, db, collection, document, command) + return err + }) + + switch { + case err == nil: + // nothing + case errors.Is(err, pgdb.ErrTableNotExist): + return nil, commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrNamespaceNotFound, + fmt.Sprintf("ns not found %s.%s", db, collection), + command, + ) + case errors.Is(err, pgdb.ErrIndexCannotDelete): + return nil, commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrInvalidOptions, + "cannot drop _id index", + command, + ) + default: + return nil, lazyerrors.Error(err) + } + + replyDoc := must.NotFail(types.NewDocument( + "nIndexesWas", nIndexesWas, + )) + + if responseMsg != "" { + replyDoc.Set("msg", responseMsg) + } + + replyDoc.Set( + "ok", float64(1), + ) + + var reply wire.OpMsg + must.NoError(reply.SetSections(wire.OpMsgSection{ + Documents: []*types.Document{replyDoc}, + })) + + return &reply, nil +} + +// processIndexDrop parses index doc and processes index deletion based on the provided params. +// Upon successful drop of index, it returns the number of indexes we had in the database +// before the indexes were dropped, and if any message related to particular indexes being dropped. +func processIndexDrop(ctx context.Context, tx pgx.Tx, db, collection string, doc *types.Document, command string) (int32, string, error) { //nolint:lll // for readability + v, err := doc.Get("index") + if err != nil { + return 0, "", commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrMissingField, + "BSON field 'dropIndexes.index' is missing but a required field", + command, + ) + } + + var nIndexesWas int32 + + switch v := v.(type) { + case *types.Document: + // Index specification (key) is provided to drop a specific index. + var indexKey pgdb.IndexKey + + indexKey, err = processIndexKey(v) + if err != nil { + return 0, "", lazyerrors.Error(err) + } + + nIndexesWas, err = pgdb.DropIndex(ctx, tx, db, collection, &pgdb.Index{Key: indexKey}) + + switch { + case err == nil: + return nIndexesWas, "", nil + case errors.Is(err, pgdb.ErrIndexNotExist): + return 0, "", commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrIndexNotFound, + fmt.Sprintf("can't find index with key: %s", types.FormatAnyValue(v)), + command, + ) + default: + return 0, "", lazyerrors.Error(err) + } + case *types.Array: + // List of index names is provided to drop multiple indexes. + iter := v.Iterator() + + defer iter.Close() // It's safe to defer here as the iterators reads everything. + + for { + var val any + _, val, err = iter.Next() + + switch { + case err == nil: + // nothing + case errors.Is(err, iterator.ErrIteratorDone): + return nIndexesWas, "", nil + default: + return 0, "", lazyerrors.Error(err) + } + + index, ok := val.(string) + if !ok { + return 0, "", commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrTypeMismatch, + fmt.Sprintf( + "BSON field 'dropIndexes.index' is the wrong type '%s', expected types '[string, object]'", + pjson.GetTypeOfValue(v), + ), + command, + ) + } + + var nsIndexesWasCurrent int32 + nsIndexesWasCurrent, err = pgdb.DropIndex(ctx, tx, db, collection, &pgdb.Index{Name: index}) + + // nIndexesWas should give the number of indexes we had in the database before we start dropping indexes. + if nIndexesWas == 0 { + nIndexesWas = nsIndexesWasCurrent + } + + switch { + case err == nil: + continue + case errors.Is(err, pgdb.ErrIndexNotExist): + return 0, "", commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrIndexNotFound, + fmt.Sprintf("index not found with name [%s]", index), + command, + ) + + default: + return 0, "", lazyerrors.Error(err) + } + } + case string: + if v == "*" { + // Drop all indexes except the _id index. + nIndexesWas, err = pgdb.DropAllIndexes(ctx, tx, db, collection) + if err != nil { + return 0, "", lazyerrors.Error(err) + } + + return nIndexesWas, "non-_id indexes dropped for collection", nil + } + + // Index name is provided to drop a specific index. + nIndexesWas, err = pgdb.DropIndex(ctx, tx, db, collection, &pgdb.Index{Name: v}) + + switch { + case err == nil: + return nIndexesWas, "", nil + case errors.Is(err, pgdb.ErrIndexNotExist): + return 0, "", commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrIndexNotFound, + fmt.Sprintf("index not found with name [%s]", v), + command, + ) + default: + return 0, "", lazyerrors.Error(err) + } + } + + return 0, "", commonerrors.NewCommandErrorMsgWithArgument( + commonerrors.ErrTypeMismatch, + fmt.Sprintf( + "BSON field 'dropIndexes.index' is the wrong type '%s', expected types '[string, object]'", + pjson.GetTypeOfValue(v), + ), + command, + ) +} diff --git a/internal/handlers/pg/pgdb/indexes.go b/internal/handlers/pg/pgdb/indexes.go index cee8c844cac4..4476460997f3 100644 --- a/internal/handlers/pg/pgdb/indexes.go +++ b/internal/handlers/pg/pgdb/indexes.go @@ -80,6 +80,103 @@ func CreateIndexIfNotExists(ctx context.Context, tx pgx.Tx, db, collection strin return nil } +// Equal returns true if the given index key is equal to the current one. +func (k IndexKey) Equal(v IndexKey) bool { + if len(k) != len(v) { + return false + } + + for i := range k { + if k[i] != v[i] { + return false + } + } + + return true +} + +// DropIndex drops index. If the index was not found, it returns error. +func DropIndex(ctx context.Context, tx pgx.Tx, db, collection string, index *Index) (int32, error) { + ms := newMetadataStorage(tx, db, collection) + + metadata, err := ms.get(ctx, true) + if err != nil { + return 0, err + } + + nIndexesWas := int32(len(metadata.indexes)) + + for i := nIndexesWas - 1; i >= 0; i-- { + current := metadata.indexes[i] + + var deleteCurrentIndex bool + + if index.Name != "" { + // delete by name + deleteCurrentIndex = current.Name == index.Name + } else { + // delete by key + deleteCurrentIndex = current.Key.Equal(index.Key) + } + + if !deleteCurrentIndex { + continue + } + + if current.Name == "_id_" { + // cannot delete _id index + return 0, ErrIndexCannotDelete + } + + if err = dropPgIndex(ctx, tx, db, current.pgIndex); err != nil { + return 0, lazyerrors.Error(err) + } + + // remove i-th element from the slice + metadata.indexes = append(metadata.indexes[:i], metadata.indexes[i+1:]...) + + if err := ms.set(ctx, metadata); err != nil { + return 0, lazyerrors.Error(err) + } + + return nIndexesWas, nil + } + + // Did not find the index to delete + return 0, ErrIndexNotExist +} + +// DropAllIndexes deletes all indexes on the collection except _id index. +func DropAllIndexes(ctx context.Context, tx pgx.Tx, db, collection string) (int32, error) { + ms := newMetadataStorage(tx, db, collection) + + metadata, err := ms.get(ctx, true) + if err != nil { + return 0, lazyerrors.Error(err) + } + + nIndexesWas := int32(len(metadata.indexes)) + + for i := nIndexesWas - 1; i >= 0; i-- { + if metadata.indexes[i].Name == "_id_" { + continue + } + + if err = dropPgIndex(ctx, tx, db, metadata.indexes[i].pgIndex); err != nil { + return 0, lazyerrors.Error(err) + } + + // remove i-th element from the slice + metadata.indexes = append(metadata.indexes[:i], metadata.indexes[i+1:]...) + } + + if err := ms.set(ctx, metadata); err != nil { + return 0, lazyerrors.Error(err) + } + + return nIndexesWas, nil +} + // createPgIndexIfNotExists creates a new index for the given params if it does not exist. func createPgIndexIfNotExists(ctx context.Context, tx pgx.Tx, schema, table, index string, fields IndexKey, isUnique bool) error { if len(fields) == 0 { @@ -121,6 +218,19 @@ func createPgIndexIfNotExists(ctx context.Context, tx pgx.Tx, schema, table, ind return nil } +// dropPgIndex drops the given index. +func dropPgIndex(ctx context.Context, tx pgx.Tx, schema, index string) error { + var err error + + sql := `DROP INDEX ` + pgx.Identifier{schema, index}.Sanitize() + + if _, err = tx.Exec(ctx, sql); err != nil { + return lazyerrors.Error(err) + } + + return nil +} + // quoteString returns a string that is safe to use in SQL queries. // // Deprecated: Warning! Avoid using this function unless there is no other way. diff --git a/internal/handlers/pg/pgdb/indexes_test.go b/internal/handlers/pg/pgdb/indexes_test.go index a13f85f94fda..306a1758c730 100644 --- a/internal/handlers/pg/pgdb/indexes_test.go +++ b/internal/handlers/pg/pgdb/indexes_test.go @@ -15,7 +15,10 @@ package pgdb import ( + "errors" "fmt" + "runtime" + "sync" "testing" "github.com/jackc/pgx/v4" @@ -63,3 +66,295 @@ func TestCreateIndexIfNotExists(t *testing.T) { ) assert.Equal(t, expectedIndexdef, indexdef) } + +// TestDropIndexes checks that we correctly drop indexes for various combination of existing indexes. +func TestDropIndexes(t *testing.T) { + ctx := testutil.Ctx(t) + pool := getPool(ctx, t) + + databaseName := testutil.DatabaseName(t) + collectionName := testutil.CollectionName(t) + setupDatabase(ctx, t, pool, databaseName) + + err := pool.InTransaction(ctx, func(tx pgx.Tx) error { + return CreateCollectionIfNotExists(ctx, tx, databaseName, collectionName) + }) + require.NoError(t, err) + + for name, tc := range map[string]struct { + expectedErr error // expected error, if any + toCreate []Index // indexes to create before dropping + toDrop []Index // indexes to drop + expected []Index // expected indexes to remain after dropping attempt + }{ + "NonExistent": { + toCreate: []Index{}, + toDrop: []Index{{Name: "foo_1"}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + }, + expectedErr: ErrIndexNotExist, + }, + "DropOneByName": { + toCreate: []Index{ + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + }, + toDrop: []Index{{Name: "foo_1"}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + }, + }, + "DropOneByKey": { + toCreate: []Index{ + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + }, + toDrop: []Index{{Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + }, + }, + "DropOneFromTheBeginning": { + toCreate: []Index{ + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + {Name: "bar_1", Key: []IndexKeyPair{{Field: "bar", Order: types.Ascending}}}, + {Name: "car_1", Key: []IndexKeyPair{{Field: "car", Order: types.Ascending}}}, + }, + toDrop: []Index{{Name: "foo_1"}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + {Name: "bar_1", Key: []IndexKeyPair{{Field: "bar", Order: types.Ascending}}}, + {Name: "car_1", Key: []IndexKeyPair{{Field: "car", Order: types.Ascending}}}, + }, + }, + "DropOneFromTheMiddle": { + toCreate: []Index{ + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + {Name: "bar_1", Key: []IndexKeyPair{{Field: "bar", Order: types.Ascending}}}, + {Name: "car_1", Key: []IndexKeyPair{{Field: "car", Order: types.Ascending}}}, + }, + toDrop: []Index{{Name: "bar_1"}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + {Name: "car_1", Key: []IndexKeyPair{{Field: "car", Order: types.Ascending}}}, + }, + }, + "DropOneFromTheEnd": { + toCreate: []Index{ + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + {Name: "bar_1", Key: []IndexKeyPair{{Field: "bar", Order: types.Ascending}}}, + {Name: "car_1", Key: []IndexKeyPair{{Field: "car", Order: types.Ascending}}}, + }, + toDrop: []Index{{Name: "car_1"}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + {Name: "bar_1", Key: []IndexKeyPair{{Field: "bar", Order: types.Ascending}}}, + }, + }, + "DropTwo": { + toCreate: []Index{ + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + {Name: "bar_1", Key: []IndexKeyPair{{Field: "bar", Order: types.Ascending}}}, + {Name: "car_1", Key: []IndexKeyPair{{Field: "car", Order: types.Ascending}}}, + }, + toDrop: []Index{{Name: "car_1"}, {Name: "foo_1"}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + {Name: "bar_1", Key: []IndexKeyPair{{Field: "bar", Order: types.Ascending}}}, + }, + }, + "DropComplicated": { + toCreate: []Index{ + {Name: "v_-1", Key: []IndexKeyPair{{Field: "v", Order: types.Descending}}}, + {Name: "v_1_foo_1", Key: []IndexKeyPair{{Field: "v", Order: types.Ascending}, {Field: "foo", Order: types.Ascending}}}, //nolint:lll // for readability + {Name: "v.foo_-1", Key: []IndexKeyPair{{Field: "v.foo", Order: types.Descending}}}, + }, + toDrop: []Index{{Name: "v_-1"}, {Name: "v_1_foo_1"}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + {Name: "v.foo_-1", Key: []IndexKeyPair{{Field: "v.foo", Order: types.Descending}}}, + }, + }, + "DropAll": { + toCreate: []Index{ + {Name: "foo_1", Key: []IndexKeyPair{{Field: "foo", Order: types.Ascending}}}, + {Name: "bar_1", Key: []IndexKeyPair{{Field: "bar", Order: types.Ascending}}}, + {Name: "car_1", Key: []IndexKeyPair{{Field: "car", Order: types.Ascending}}}, + }, + toDrop: []Index{{Name: "bar_1"}, {Name: "car_1"}, {Name: "foo_1"}}, + expected: []Index{ + {Name: "_id_", Key: []IndexKeyPair{{Field: "_id", Order: types.Ascending}}, Unique: true}, + }, + }, + } { + tc := tc + + // We don't run this subtest in parallel because we use the same database and collection. + t.Run(name, func(t *testing.T) { + t.Helper() + + err := pool.InTransaction(ctx, func(tx pgx.Tx) error { + for _, idx := range tc.toCreate { + if err := CreateIndexIfNotExists(ctx, tx, databaseName, collectionName, &idx); err != nil { + return err + } + } + + return nil + }) + require.NoError(t, err) + + expectedWas := int32(len(tc.toCreate) + 1) // created indexes + default _id index + err = pool.InTransaction(ctx, func(tx pgx.Tx) error { + for _, idx := range tc.toDrop { + var was int32 + was, err = DropIndex(ctx, tx, databaseName, collectionName, &idx) + if err != nil { + return err + } + + assert.Equal(t, expectedWas, was) + expectedWas-- + } + + return nil + }) + + if tc.expectedErr != nil { + assert.True(t, errors.Is(err, tc.expectedErr)) + } else { + require.NoError(t, err) + } + + err = pool.InTransaction(ctx, func(tx pgx.Tx) error { + var indexes []Index + + indexes, err = Indexes(ctx, tx, databaseName, collectionName) + if err != nil { + return err + } + + assert.Equal(t, tc.expected, indexes) + return nil + }) + require.NoError(t, err) + + err = pool.InTransaction(ctx, func(tx pgx.Tx) error { + var was int32 + was, err = DropAllIndexes(ctx, tx, databaseName, collectionName) + if err != nil { + return err + } + + assert.Equal(t, expectedWas, was) + + var indexes []Index + indexes, err = Indexes(ctx, tx, databaseName, collectionName) + if err != nil { + return err + } + + assert.Len(t, indexes, 1) // only default _id index left + return nil + }) + require.NoError(t, err) + }) + } +} + +func TestDropIndexesStress(t *testing.T) { + ctx := testutil.Ctx(t) + pool := getPool(ctx, t) + + databaseName := testutil.DatabaseName(t) + collectionName := testutil.CollectionName(t) + setupDatabase(ctx, t, pool, databaseName) + + var initialIndexes []Index + var err error + + err = pool.InTransactionRetry(ctx, func(tx pgx.Tx) error { + if err = CreateCollection(ctx, tx, databaseName, collectionName); err != nil { + return err + } + + initialIndexes, err = Indexes(ctx, tx, databaseName, collectionName) + return err + }) + require.NoError(t, err) + + indexName := "test" + indexKeys := []IndexKeyPair{{Field: "foo", Order: types.Ascending}, {Field: "bar", Order: types.Descending}} + + err = pool.InTransaction(ctx, func(tx pgx.Tx) error { + idx := Index{ + Name: indexName, + Key: indexKeys, + } + + return CreateIndexIfNotExists(ctx, tx, databaseName, collectionName, &idx) + }) + require.NoError(t, err) + + var indexesAfterCreate []Index + + err = pool.InTransactionRetry(ctx, func(tx pgx.Tx) error { + indexesAfterCreate, err = Indexes(ctx, tx, databaseName, collectionName) + return err + }) + require.NoError(t, err) + + dropNum := runtime.GOMAXPROCS(-1) * 10 + + ready := make(chan struct{}, dropNum) + start := make(chan struct{}) + + var wg sync.WaitGroup + for i := 0; i <= dropNum; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + ready <- struct{}{} + + <-start + + // do not use `err`, to avoid data race + tErr := pool.InTransaction(ctx, func(tx pgx.Tx) error { + idx := Index{ + Name: indexName, + Key: indexKeys, + } + + // do not use `err`, to avoid data race + _, dropErr := DropIndex(ctx, tx, databaseName, collectionName, &idx) + return dropErr + }) + // if the index could not be dropped, the error is checked + if tErr != nil { + require.Error(t, tErr, ErrIndexNotExist) + } + }() + } + + for i := 0; i < dropNum; i++ { + <-ready + } + + close(start) + + wg.Wait() + + var indexesAfterDrop []Index + + err = pool.InTransactionRetry(ctx, func(tx pgx.Tx) error { + indexesAfterDrop, err = Indexes(ctx, tx, databaseName, collectionName) + return err + }) + require.NoError(t, err) + + require.Equal(t, initialIndexes, indexesAfterDrop) + require.NotEqual(t, indexesAfterCreate, indexesAfterDrop) +} diff --git a/internal/handlers/pg/pgdb/pgdb.go b/internal/handlers/pg/pgdb/pgdb.go index 7ced127c1e3b..e2e69a2e3e07 100644 --- a/internal/handlers/pg/pgdb/pgdb.go +++ b/internal/handlers/pg/pgdb/pgdb.go @@ -36,6 +36,12 @@ var ( // ErrIndexNameAlreadyExist indicates that an index name already exists with a different key. ErrIndexNameAlreadyExist = fmt.Errorf("index name already exists with a different key") + // ErrIndexNotExist indicates there is no such index. + ErrIndexNotExist = fmt.Errorf("index does not exist") + + // ErrIndexCannotDelete indicates the index cannot be deleted. + ErrIndexCannotDelete = fmt.Errorf("index cannot be deleted") + // ErrInvalidCollectionName indicates that a collection didn't pass name checks. ErrInvalidCollectionName = fmt.Errorf("invalid FerretDB collection name") diff --git a/internal/handlers/tigris/msg_dropindexes.go b/internal/handlers/tigris/msg_dropindexes.go new file mode 100644 index 000000000000..e5cc4c00c934 --- /dev/null +++ b/internal/handlers/tigris/msg_dropindexes.go @@ -0,0 +1,45 @@ +// Copyright 2021 FerretDB Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tigris + +import ( + "context" + + "github.com/FerretDB/FerretDB/internal/handlers/common" + "github.com/FerretDB/FerretDB/internal/types" + "github.com/FerretDB/FerretDB/internal/util/must" + "github.com/FerretDB/FerretDB/internal/wire" +) + +// MsgDropIndexes implements HandlerInterface. +func (h *Handler) MsgDropIndexes(ctx context.Context, msg *wire.OpMsg) (*wire.OpMsg, error) { + // TODO https://github.com/FerretDB/FerretDB/issues/78 + + document, err := msg.Document() + if err != nil { + return nil, err + } + + common.Ignored(document, h.L, "writeConcern", "commitQuorum", "comment") + + var reply wire.OpMsg + must.NoError(reply.SetSections(wire.OpMsgSection{ + Documents: []*types.Document{must.NotFail(types.NewDocument( + "ok", float64(1), + ))}, + })) + + return &reply, nil +} diff --git a/website/docs/reference/supported-commands.md b/website/docs/reference/supported-commands.md index 387753952c10..afe5cb7df20a 100644 --- a/website/docs/reference/supported-commands.md +++ b/website/docs/reference/supported-commands.md @@ -535,7 +535,7 @@ db.aggregate() ## Administration commands | Command | Argument / Option | Property | Status | Comments | -| --------------------------------- | ------------------------------ | ------------------------- | ------ | --------------------------------------------------------- | +| --------------------------------- | ------------------------------ |---------------------------| ------ | --------------------------------------------------------- | | `listCollections` | | | ✅ | Basic command is fully supported | | | `filter` | | ✅ | | | | `nameOnly` | | ❌ | [Issue](https://github.com/FerretDB/FerretDB/issues/301) | @@ -633,8 +633,8 @@ db.aggregate() | `dropConnections` | | | ❌ | [Issue](https://github.com/FerretDB/FerretDB/issues/1511) | | | `hostAndPort` | | ⚠️ | | | | `comment` | | ⚠️ | | -| `dropIndexes` | | | ❌ | [Issue](https://github.com/FerretDB/FerretDB/issues/1512) | -| | `index` | | ⚠️ | | +| `dropIndexes` | | | ✅ | | +| | `index` | | ✅ | | | | `writeConcern` | | ⚠️ | | | | `comment` | | ⚠️ | | | `filemd5` | | | ❌ | |