Skip to content

Commit

Permalink
[databases]: add support for Opensearch advanced configuration (digit…
Browse files Browse the repository at this point in the history
…alocean#1588)

* [databases]: add support for Opensearch advanced configuration

* add PluginsAlertingFilterByBackendRolesEnabled to displayers
  • Loading branch information
loosla authored Oct 3, 2024
1 parent de1fb73 commit 6804c7d
Show file tree
Hide file tree
Showing 11 changed files with 552 additions and 16 deletions.
42 changes: 30 additions & 12 deletions commands/databases.go
Original file line number Diff line number Diff line change
Expand Up @@ -2444,6 +2444,7 @@ This command functions as a PATCH request, meaning that only the specified field
displayerType(&displayers.RedisConfiguration{}),
displayerType(&displayers.MongoDBConfiguration{}),
displayerType(&displayers.KafkaConfiguration{}),
displayerType(&displayers.OpensearchConfiguration{}),
)
AddStringFlag(
getDatabaseCfgCommand,
Expand Down Expand Up @@ -2498,14 +2499,15 @@ func RunDatabaseConfigurationGet(c *CmdConfig) error {
}

allowedEngines := map[string]any{
"mysql": nil,
"pg": nil,
"redis": nil,
"mongodb": nil,
"kafka": nil,
"mysql": nil,
"pg": nil,
"redis": nil,
"mongodb": nil,
"kafka": nil,
"opensearch": nil,
}
if _, ok := allowedEngines[engine]; !ok {
return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis', 'mongodb', 'kafka'", c.NS)
return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis', 'mongodb', 'kafka', opensearch", c.NS)
}

dbId := args[0]
Expand Down Expand Up @@ -2559,6 +2561,16 @@ func RunDatabaseConfigurationGet(c *CmdConfig) error {
KafkaConfig: *config,
}
return c.Display(&displayer)
} else if engine == "opensearch" {
config, err := c.Databases().GetOpensearchConfiguration(dbId)
if err != nil {
return err
}

displayer := displayers.OpensearchConfiguration{
OpensearchConfig: *config,
}
return c.Display(&displayer)
}

return nil
Expand All @@ -2579,14 +2591,15 @@ func RunDatabaseConfigurationUpdate(c *CmdConfig) error {
}

allowedEngines := map[string]any{
"mysql": nil,
"pg": nil,
"redis": nil,
"mongodb": nil,
"kafka": nil,
"mysql": nil,
"pg": nil,
"redis": nil,
"mongodb": nil,
"kafka": nil,
"opensearch": nil,
}
if _, ok := allowedEngines[engine]; !ok {
return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis', 'mongodb', 'kafka'", c.NS)
return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis', 'mongodb', 'kafka', 'opensearch'", c.NS)
}

configJson, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseConfigJson)
Expand Down Expand Up @@ -2620,6 +2633,11 @@ func RunDatabaseConfigurationUpdate(c *CmdConfig) error {
if err != nil {
return err
}
} else if engine == "opensearch" {
err := c.Databases().UpdateOpensearchConfiguration(dbId, configJson)
if err != nil {
return err
}
}

return nil
Expand Down
24 changes: 24 additions & 0 deletions commands/databases_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,10 @@ var (
KafkaConfig: &godo.KafkaConfig{},
}

testOpensearchConfiguration = do.OpensearchConfig{
OpensearchConfig: &godo.OpensearchConfig{},
}

topicReplicationFactor = uint32(3)
testKafkaTopic = do.DatabaseTopic{
DatabaseTopic: &godo.DatabaseTopic{
Expand Down Expand Up @@ -1680,6 +1684,16 @@ func TestDatabaseConfigurationGet(t *testing.T) {
assert.NoError(t, err)
})

withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.databases.EXPECT().GetOpensearchConfiguration(testDBCluster.ID).Return(&testOpensearchConfiguration, nil)
config.Args = append(config.Args, testDBCluster.ID)
config.Doit.Set(config.NS, doctl.ArgDatabaseEngine, "opensearch")

err := RunDatabaseConfigurationGet(config)

assert.NoError(t, err)
})

withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
err := RunDatabaseConfigurationGet(config)

Expand Down Expand Up @@ -1754,6 +1768,16 @@ func TestDatabaseConfigurationUpdate(t *testing.T) {
assert.NoError(t, err)
})

withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.databases.EXPECT().UpdateOpensearchConfiguration(testDBCluster.ID, "").Return(nil)
config.Args = append(config.Args, testDBCluster.ID)
config.Doit.Set(config.NS, doctl.ArgDatabaseEngine, "opensearch")

err := RunDatabaseConfigurationUpdate(config)

assert.NoError(t, err)
})

withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
err := RunDatabaseConfigurationUpdate(config)

Expand Down
259 changes: 259 additions & 0 deletions commands/displayers/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -1872,6 +1872,265 @@ func (dc *KafkaConfiguration) KV() []map[string]any {
return o
}

type OpensearchConfiguration struct {
OpensearchConfig do.OpensearchConfig
}

var _ Displayable = &OpensearchConfiguration{}

func (dc *OpensearchConfiguration) JSON(out io.Writer) error {
return writeJSON(dc.OpensearchConfig, out)
}

func (dc *OpensearchConfiguration) Cols() []string {
return []string{
"key",
"value",
}
}

func (dc *OpensearchConfiguration) ColMap() map[string]string {
return map[string]string{
"key": "key",
"value": "value",
}
}

func (dc *OpensearchConfiguration) KV() []map[string]any {
c := dc.OpensearchConfig
o := []map[string]any{}
if c.HttpMaxContentLengthBytes != nil {
o = append(o, map[string]any{
"key": "HttpMaxContentLengthBytes",
"value": *c.HttpMaxContentLengthBytes,
})
}
if c.HttpMaxHeaderSizeBytes != nil {
o = append(o, map[string]any{
"key": "HttpMaxHeaderSizeBytes",
"value": *c.HttpMaxHeaderSizeBytes,
})
}
if c.HttpMaxInitialLineLengthBytes != nil {
o = append(o, map[string]any{
"key": "HttpMaxInitialLineLengthBytes",
"value": *c.HttpMaxInitialLineLengthBytes,
})
}
if c.IndicesQueryBoolMaxClauseCount != nil {
o = append(o, map[string]any{
"key": "IndicesQueryBoolMaxClauseCount",
"value": *c.IndicesQueryBoolMaxClauseCount,
})
}
if c.IndicesFielddataCacheSizePercentage != nil {
o = append(o, map[string]any{
"key": "IndicesFielddataCacheSizePercentage",
"value": *c.IndicesFielddataCacheSizePercentage,
})
}
if c.IndicesMemoryIndexBufferSizePercentage != nil {
o = append(o, map[string]any{
"key": "IndicesMemoryIndexBufferSizePercentage",
"value": *c.IndicesMemoryIndexBufferSizePercentage,
})
}
if c.IndicesMemoryMinIndexBufferSizeMb != nil {
o = append(o, map[string]any{
"key": "IndicesMemoryMinIndexBufferSizeMb",
"value": *c.IndicesMemoryMinIndexBufferSizeMb,
})
}
if c.IndicesMemoryMaxIndexBufferSizeMb != nil {
o = append(o, map[string]any{
"key": "IndicesMemoryMaxIndexBufferSizeMb",
"value": *c.IndicesMemoryMaxIndexBufferSizeMb,
})
}
if c.IndicesQueriesCacheSizePercentage != nil {
o = append(o, map[string]any{
"key": "IndicesQueriesCacheSizePercentage",
"value": *c.IndicesQueriesCacheSizePercentage,
})
}
if c.IndicesRecoveryMaxMbPerSec != nil {
o = append(o, map[string]any{
"key": "IndicesRecoveryMaxMbPerSec",
"value": *c.IndicesRecoveryMaxMbPerSec,
})
}
if c.IndicesRecoveryMaxConcurrentFileChunks != nil {
o = append(o, map[string]any{
"key": "IndicesRecoveryMaxConcurrentFileChunks",
"value": *c.IndicesRecoveryMaxConcurrentFileChunks,
})
}
if c.ThreadPoolSearchSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolSearchSize",
"value": *c.ThreadPoolSearchSize,
})
}
if c.ThreadPoolSearchThrottledSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolSearchThrottledSize",
"value": *c.ThreadPoolSearchThrottledSize,
})
}
if c.ThreadPoolGetSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolGetSize",
"value": *c.ThreadPoolGetSize,
})
}
if c.ThreadPoolAnalyzeSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolAnalyzeSize",
"value": *c.ThreadPoolAnalyzeSize,
})
}
if c.ThreadPoolWriteSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolWriteSize",
"value": *c.ThreadPoolWriteSize,
})
}
if c.ThreadPoolForceMergeSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolForceMergeSize",
"value": *c.ThreadPoolForceMergeSize,
})
}
if c.ThreadPoolSearchQueueSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolSearchQueueSize",
"value": *c.ThreadPoolSearchQueueSize,
})
}
if c.ThreadPoolSearchThrottledQueueSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolSearchThrottledQueueSize",
"value": *c.ThreadPoolSearchThrottledQueueSize,
})
}
if c.ThreadPoolGetQueueSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolGetQueueSize",
"value": *c.ThreadPoolGetQueueSize,
})
}
if c.ThreadPoolAnalyzeQueueSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolAnalyzeQueueSize",
"value": *c.ThreadPoolAnalyzeQueueSize,
})
}
if c.ThreadPoolWriteQueueSize != nil {
o = append(o, map[string]any{
"key": "ThreadPoolWriteQueueSize",
"value": *c.ThreadPoolWriteQueueSize,
})
}
if c.IsmEnabled != nil {
o = append(o, map[string]any{
"key": "IsmEnabled",
"value": *c.IsmEnabled,
})
}
if c.IsmHistoryEnabled != nil {
o = append(o, map[string]any{
"key": "IsmHistoryEnabled",
"value": *c.IsmHistoryEnabled,
})
}
if c.IsmHistoryMaxAgeHours != nil {
o = append(o, map[string]any{
"key": "IsmHistoryMaxAgeHours",
"value": *c.IsmHistoryMaxAgeHours,
})
}
if c.IsmHistoryMaxDocs != nil {
o = append(o, map[string]any{
"key": "IsmHistoryMaxDocs",
"value": *c.IsmHistoryMaxDocs,
})
}
if c.IsmHistoryRolloverCheckPeriodHours != nil {
o = append(o, map[string]any{
"key": "IsmHistoryRolloverCheckPeriodHours",
"value": *c.IsmHistoryRolloverCheckPeriodHours,
})
}
if c.IsmHistoryRolloverRetentionPeriodDays != nil {
o = append(o, map[string]any{
"key": "IsmHistoryRolloverRetentionPeriodDays",
"value": *c.IsmHistoryRolloverRetentionPeriodDays,
})
}
if c.SearchMaxBuckets != nil {
o = append(o, map[string]any{
"key": "SearchMaxBuckets",
"value": *c.SearchMaxBuckets,
})
}
if c.ActionAutoCreateIndexEnabled != nil {
o = append(o, map[string]any{
"key": "ActionAutoCreateIndexEnabled",
"value": *c.ActionAutoCreateIndexEnabled,
})
}
if c.EnableSecurityAudit != nil {
o = append(o, map[string]any{
"key": "EnableSecurityAudit",
"value": *c.EnableSecurityAudit,
})
}
if c.ActionDestructiveRequiresName != nil {
o = append(o, map[string]any{
"key": "ActionDestructiveRequiresName",
"value": *c.ActionDestructiveRequiresName,
})
}
if c.ClusterMaxShardsPerNode != nil {
o = append(o, map[string]any{
"key": "ClusterMaxShardsPerNode",
"value": *c.ClusterMaxShardsPerNode,
})
}
if c.OverrideMainResponseVersion != nil {
o = append(o, map[string]any{
"key": "OverrideMainResponseVersion",
"value": *c.OverrideMainResponseVersion,
})
}
if c.ScriptMaxCompilationsRate != nil {
o = append(o, map[string]any{
"key": "ScriptMaxCompilationsRate",
"value": *c.ScriptMaxCompilationsRate,
})
}
if c.ClusterRoutingAllocationNodeConcurrentRecoveries != nil {
o = append(o, map[string]any{
"key": "ClusterRoutingAllocationNodeConcurrentRecoveries",
"value": *c.ClusterRoutingAllocationNodeConcurrentRecoveries,
})
}
if c.ReindexRemoteWhitelist != nil {
o = append(o, map[string]any{
"key": "ReindexRemoteWhitelist",
"value": c.ReindexRemoteWhitelist,
})
}
if c.PluginsAlertingFilterByBackendRolesEnabled != nil {
o = append(o, map[string]any{
"key": "PluginsAlertingFilterByBackendRolesEnabled",
"value": *c.PluginsAlertingFilterByBackendRolesEnabled,
})
}

return o
}

type DatabaseEvents struct {
DatabaseEvents do.DatabaseEvents
}
Expand Down
Loading

0 comments on commit 6804c7d

Please sign in to comment.