Skip to content

Commit

Permalink
Add distribution points intake endpoint (#1518)
Browse files Browse the repository at this point in the history
* fix example generation

* attempt to fix example

* use depth

* remove item created in merge

* fix exampless

* remove extra new liness

* Regenerate client from commit 4569968f of spec repo

Co-authored-by: Sherzod K <sherzod.karimov@datadoghq.com>
Co-authored-by: api-clients-generation-pipeline[bot] <54105614+api-clients-generation-pipeline[bot]@users.noreply.github.com>
Co-authored-by: ci.datadog-api-spec <packages@datadoghq.com>
  • Loading branch information
3 people authored Jun 13, 2022
1 parent 7d9d871 commit e9bbc95
Show file tree
Hide file tree
Showing 13 changed files with 1,282 additions and 8 deletions.
8 changes: 4 additions & 4 deletions .apigentools-info
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@
"spec_versions": {
"v1": {
"apigentools_version": "1.6.2",
"regenerated": "2022-06-09 11:12:46.854122",
"spec_repo_commit": "b9e36dc8"
"regenerated": "2022-06-10 18:23:29.147073",
"spec_repo_commit": "4569968f"
},
"v2": {
"apigentools_version": "1.6.2",
"regenerated": "2022-06-09 11:12:46.864763",
"spec_repo_commit": "b9e36dc8"
"regenerated": "2022-06-10 18:23:29.159386",
"spec_repo_commit": "4569968f"
}
}
}
158 changes: 158 additions & 0 deletions .generator/schemas/v1/openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1299,6 +1299,96 @@ components:
type: integer
readOnly: true
type: object
DistributionPoint:
description: Array of distribution points.
example:
- 1575317847.0
- - 0.5
- 1.0
items:
description: List of distribution point.
oneOf:
- $ref: '#/components/schemas/DistributionPointTimestamp'
- $ref: '#/components/schemas/DistributionPointData'
maxItems: 2
minItems: 2
type: array
DistributionPointData:
description: Distribution point data.
items:
description: List of distribution point data.
format: double
type: number
type: array
DistributionPointTimestamp:
description: Distribution point timestamp. It should be in seconds and current.
format: double
type: number
DistributionPointsContentEncoding:
description: HTTP header used to compress the media-type.
enum:
- deflate
type: string
x-enum-varnames:
- DEFLATE
DistributionPointsPayload:
description: The distribution points payload.
properties:
series:
description: A list of distribution points series to submit to Datadog.
example:
- metric: system.load.1
points:
- - 1475317847.0
- - 1.0
- 2.0
items:
$ref: '#/components/schemas/DistributionPointsSeries'
type: array
required:
- series
type: object
DistributionPointsSeries:
description: A distribution points metric to submit to Datadog.
properties:
host:
description: The name of the host that produced the distribution point metric.
example: test.example.com
type: string
metric:
description: The name of the distribution points metric.
example: system.load.1
type: string
points:
description: Points relating to the distribution point metric. All points
must be tuples with timestamp and a list of values (cannot be a string).
Timestamps should be in POSIX time in seconds.
items:
$ref: '#/components/schemas/DistributionPoint'
type: array
tags:
description: A list of tags associated with the distribution point metric.
example:
- environment:test
items:
description: Individual tags.
type: string
type: array
type:
$ref: '#/components/schemas/DistributionPointsType'
required:
- metric
- points
type: object
DistributionPointsType:
default: distribution
description: The type of the distribution point.
enum:
- distribution
example: distribution
type: string
x-enum-varnames:
- DISTRIBUTION
DistributionWidgetDefinition:
description: "The Distribution visualization is another way of showing metrics\naggregated
across one or several tags, such as hosts.\nUnlike the heat map, a distribution
Expand Down Expand Up @@ -18016,6 +18106,74 @@ paths:
x-menu-order: 6
x-undo:
type: idempotent
/api/v1/distribution_points:
post:
description: "The distribution points end-point allows you to post distribution
data that can be graphed on Datadog\u2019s dashboards."
operationId: SubmitDistributionPoints
parameters:
- description: HTTP header used to compress the media-type.
in: header
name: Content-Encoding
required: false
schema:
$ref: '#/components/schemas/DistributionPointsContentEncoding'
requestBody:
content:
text/json:
examples:
dynamic-points:
description: "Post time-series data that can be graphed on Datadog\u2019s
dashboards."
externalValue: examples/metrics/distribution-points.json.sh
summary: Dynamic Points
x-variables:
NOW: $(date +%s)
schema:
$ref: '#/components/schemas/DistributionPointsPayload'
required: true
responses:
'202':
content:
text/json:
schema:
$ref: '#/components/schemas/IntakePayloadAccepted'
description: Payload accepted
'400':
content:
text/json:
schema:
$ref: '#/components/schemas/APIErrorResponse'
description: Bad Request
'403':
content:
text/json:
schema:
$ref: '#/components/schemas/APIErrorResponse'
description: Authentication error
'408':
content:
text/json:
schema:
$ref: '#/components/schemas/APIErrorResponse'
description: Request timeout
'413':
content:
text/json:
schema:
$ref: '#/components/schemas/APIErrorResponse'
description: Payload too large
'429':
$ref: '#/components/responses/TooManyRequestsResponse'
security:
- apiKeyAuth: []
summary: Submit distribution points
tags:
- Metrics
x-codegen-request-body-name: body
x-menu-order: 1
x-undo:
type: safe
/api/v1/downtime:
get:
description: Get all scheduled downtimes.
Expand Down
58 changes: 54 additions & 4 deletions .generator/src/generator/formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ def open_file(x):
if (not required or schema.get("nullable")) and schema.get("type") is not None:
return reference_to_value(schema, parameters, print_nullable=not in_list)

if "oneOf" in schema and name:
if "oneOf" in schema:
matched = 0
one_of_schema = None
for sub_schema in schema["oneOf"]:
Expand Down Expand Up @@ -456,7 +456,10 @@ def open_file(x):
if not one_of_schema_name:
one_of_schema_name = simple_type(one_of_schema).title()
reference = "" if required or nullable else "&"
return f"{reference}{name_prefix}{name}{{\n{one_of_schema_name}: {parameters}}}"
if name:
return f"{reference}{name_prefix}{name}{{\n{one_of_schema_name}: {parameters}}}"
else:
return f"{{{one_of_schema_name}: {reference}{parameters}}}"

return parameters

Expand All @@ -475,17 +478,62 @@ def format_data_with_schema_list(
if not schema:
return ""

if "oneOf" in schema:
parameters = ""
matched = 0
one_of_schema = None
for sub_schema in schema["oneOf"]:
try:
if sub_schema.get("nullable") and data is None:
formatted = "nil"
else:
sub_schema["nullable"] = False
formatted = format_data_with_schema(
data,
sub_schema,
name_prefix=name_prefix,
replace_values=replace_values,
**kwargs,
)
if matched == 0:
one_of_schema = sub_schema
parameters = formatted
matched += 1
except (KeyError, ValueError) as e:
print(f"{e}")

if matched == 0:
raise ValueError(f"[{matched}] {data} is not valid for schema {name}")
elif matched > 1:
warnings.warn(f"[{matched}] {data} is not valid for schema {name}")

one_of_schema_name = simple_type(one_of_schema) or f"{schema_name(one_of_schema)}"
reference = "" if one_of_schema.get("required", False) else "&"
return f"{{{one_of_schema_name}: {reference}{parameters}}}"

parameters = ""
# collect nested array types until you find a non-array type
schema_parts = [(required, "[]")]
list_schema = schema["items"]
depth = 1
while list_schema.get("type") == "array":
schema_parts.append((not list_schema.get("nullable", False), "[]"))
list_schema = list_schema["items"]
depth += 1

nested_prefix = list_schema.get("nullable", False) and "*" or ""
nested_schema_name = schema_name(list_schema)
nested_schema_name = f"{name_prefix}{nested_schema_name}" if nested_schema_name else "interface{}"
if "oneOf" in list_schema:
if schema_name(list_schema):
nested_schema_name = f"{name_prefix}{schema_name(list_schema)}"
elif schema_name(schema['items']):
nested_schema_name = f"{name_prefix}{schema_name(schema['items'])}Item"
else:
nested_schema_name = "interface{}"
else:
nested_schema_name = schema_name(list_schema)
nested_schema_name = f"{name_prefix}{nested_schema_name}" if nested_schema_name else "interface{}"

nested_type = simple_type(list_schema)
schema_parts.append(
(
Expand All @@ -509,7 +557,9 @@ def format_data_with_schema_list(
parameters += f"{value},\n"

if in_list:
return f"{{\n{parameters}}}"
for _ in range(depth):
parameters = f"{{\n{parameters}}}"
return parameters

return f"{nested_simple_type_name}{{\n{parameters}}}"

Expand Down
Loading

0 comments on commit e9bbc95

Please sign in to comment.