-
Notifications
You must be signed in to change notification settings - Fork 57
/
Copy pathjob_input.py
435 lines (351 loc) · 19.2 KB
/
job_input.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
# Copyright 2023-2024 Broadcom
# SPDX-License-Identifier: Apache-2.0
import pathlib
from abc import ABCMeta
from abc import abstractmethod
from typing import Any
from typing import List
from typing import Optional
class IProperties:
"""
Allows for Data Job to store state (key-value pairs) across job runs.
Properties are solution for the following use cases
* Keep state of application (for example last ingested row timestamp, so you can continue form there on next run)
* Keeping API keys and passwords necessary to connect to different systems
* Keeping custom configuration of a data job.
Data Job properties are also used for parameter substitution in queries, see execute_query docstring.
"""
@abstractmethod
def get_property(self, name: str, default_value: Any = None) -> str:
pass
@abstractmethod
def get_all_properties(self) -> dict:
pass
@abstractmethod
def set_all_properties(self, properties: dict):
pass
class ISecrets:
"""
Allows for Data Job to store and retrieve sensitive data.
Secrets are solution for the following use cases
* Keeping API keys and passwords necessary to connect to different systems
"""
@abstractmethod
def get_secret(self, key: str, default_value: Any = None) -> str:
pass
@abstractmethod
def get_all_secrets(self) -> dict:
pass
@abstractmethod
def set_all_secrets(self, secrets: dict):
pass
class IJobArguments:
"""
Allows for users to pass arguments to data job run.
Data Job arguments are also used for parameter substitution in queries, see execute_query docstring.
"""
@abstractmethod
def get_arguments(self) -> dict:
"""
Returns arguments are passed when running data job. Arguments present dictionary key value pairs.
The arguments are passed to each step.
"""
pass
class IManagedConnection:
"""
Takes care of managing raw database connections.
It provides features improved error handling, lineage gathering, smart re-tries.
And more that can be easily customized and extended by VDK plugins.
"""
@abstractmethod
def execute_query(self, query_as_utf8_string) -> List[List]:
"""
Executes the provided query and returns results from PEP 249 Cursor.fetchall() method, see:
https://www.python.org/dev/peps/pep-0249/#fetchall
Query can contain parameters in format -> {query_parameter}. Parameters in the query will
be automatically substituted if they exist in Data Job properties and arguments as keys.
If parameter with same key is used both as arguments and properties,
arguments (get_arguments()) will take precedence over properties (get_property()).
Note:
Parameters are case sensitive.
If a query parameter is not present in Data Job properties or arguments it will not be replaced in the query
and most likely result in query failure.
Example usage:
job_input.set_all_properties({'target_table': 'history.people','etl_run_date_column': 'pa__arrival_ts'})
query_result = job_input.execute_query("SELECT * FROM {target_table} WHERE {etl_run_date_column} > now() - interval 2 hours")
"""
pass
@abstractmethod
def get_managed_connection(self):
"""
Returns the currently initialized PEP249 connection.
In this case to execute query you need to open and close cursor yourself.
It's generally easier to use #execute_query method above,
Use this method if you need to pass connection to 3th party library (sqlAlchemy or pandas)
"""
class IIngester:
"""
Methods in this interface are suitable for ingesting new data into remote data stores.
The Ingester client provides an easy to use and thread-safe APIs to collect and transfer data to any destination.
It offers a simple, best effort solution for serializing, packaging, and sending data.
send_*_for_ingestion provide a way to send data easily form database cursor, API response,
or any other python iterable collection.
The client can be configured via configuration overrides, or left on default settings.
The methods are asynchronous, and data being sent is done in a background threads.
This library allows buffering of data on the client before sending it.
You can also configure the frequency at which data is sent .
You can configure the parallelism in which the data is sent enabling very high throughput of sending data.
Upon job completion, the job may block until all buffers are flushed and data is sent.
if there's an error during sending data, the data job will fail upon completion with a corresponding error.
Destinations method can be installed using plugins
(for example plugins for sending over http, or over kafka, or over GCloud Pub/Sub, etc.)
"""
@abstractmethod
def send_object_for_ingestion(
self,
payload: dict,
destination_table: Optional[str],
method: Optional[str],
target: Optional[str],
collection_id: Optional[str] = None,
):
"""
Sends a Python object, asynchronously, for ingestion.
The data (payload) is buffered by VDK Ingester framework and queued for ingestion.
The method then returns immediately unless ingester_wait_to_finish_after_every_send flag is set to true.
The data is then processed and sent asynchronously by multiple background threads in parallel
(the level of parallelism could be controlled by ingester_number_of_worker_threads configuration).
Users should not modify a payload object after it has been passed to a send_object_for_ingestion call,
as this can be thread-unsafe and cause inconsistensies in the data.
Arguments:
payload: Union[dict, "pandas.DataFrame"]
The passed object will be translated to a row in destination table.
Keys of the object are translated to columns in the table and values will populate a single row.
The payload argument can be either a dict or a pandas.DataFrame.
The pandas library is an optional dependency; ensure it is installed and included in
the job's requirements before deployment.
Note:
This method hides technical complexities around @type and @id
described in the specification, still you have the freedom to
specify @type and @id.
object size should be less than 10MB.
Example:
object = {
"address":"1 Main St. Cambridge MA",
"name":"John Doe"
}
Will produce the following DB row:
_____________________________________________
|address |name |... |
|_______________________|______________|____|
|1 Main St. Cambridge MA|John Doe |... |
|_______________________|______________|____|
destination_table: Optional[str]
The name of the table, where the data should be ingested into.
This parameter does not need to be passed, in case the table is
included in the payload itself.
method: Optional[str]
Indicates the ingestion method to be used. Example:
method="file" -> ingest to file
method="http" -> ingest using HTTP POST requests
method="kafka" -> ingest to kafka endpoint
This parameter does not need to be passed, as ingestion plugins set
a default value for it. In case multiple ingestion plugins are used,
an `INGEST_METHOD_DEFAULT` environment variable can be set that would
specify which plugin is to be used by default for ingestion.
target: Optional[str]
target identifies where the data should be ingested into.
The value for this parameter depends on the ingest method chosen.
For "http" method, it would require an HTTP URL.
Example: http://example.com/<some>/<api>/<endpoint>
For "file" method, it would require a file name or path.
See chosen ingest method (ingestion plugin) documentation for more details on the expected target format.
This parameter does not need to be used, in case the
`INGEST_TARGET_DEFAULT` environment variable is set. This can be
made by plugins, which may set default value, or it can be
overwritten by users.
collection_id: Optional[str]
Optional. An identifier to indicate that data from different method
invocations belong to same collection. Defaults to
"data_job_name|OpID", meaning all method invocations from a data job
run will belong to the same collection.
Sample usage:
response = requests.get("https://jsonplaceholder.typicode.com/users/1") # call some REST API
job_input.send_object_for_ingestion(response.json(),
"my_destination_table",
method="file"
target="some-target")
"""
pass
@abstractmethod
def send_tabular_data_for_ingestion(
self,
rows: iter,
column_names: list,
destination_table: Optional[str],
method: Optional[str],
target: Optional[str],
collection_id: Optional[str] = None,
):
"""
Sends tabular data, asynchronously, for ingestion.
The data (rows) is buffered by VDK Ingester framework and queued for ingestion.
The method then returns immediately unless ingester_wait_to_finish_after_every_send flag is set to true.
The data is then processed and sent asynchronously by multiple background threads in parallel
(the level of parallelism could be controlled by ingester_number_of_worker_threads configuration).
Arguments:
rows: one of the following: PEP249 Cursor object, Iterable 2 dimensional
structure, A representation of a
two-dimensional array that allows iteration over rows.
Can be a list of lists, iterator that returns next row
("row" = list or tuple of values),
PEP249 cursor object with successfully executed SELECT statement,
etc. E.g.:
[
[row0column0, row0column1]
[row1column0, row1column1]
]
column_names: list
the column names of the data in the same order as the values in data
provided in th rows parameter.
col[0] - corresponds to row0column0,row1column0,
col[1] to row0column1, row1column1.
destination_table: Optional[str]
The name of the table, where the data should be ingested into.
This parameter does not need to be passed, in case the table is
included in the payload itself.
method: Optional[str]
Indicates the ingestion method to be used. Example:
method="file" -> ingest to file
method="http" -> ingest using HTTP POST requests
method="kafka" -> ingest to kafka endpoint
This parameter does not need to be passed. In case multiple ingestion plugins are used,
an `VDK_INGEST_METHOD_DEFAULT` environment variable can be set that would
specify which plugin is to be used by default for ingestion.
Different methods can be added through Plugins.
See plugin_input for how to develop a new plugin with new ingest method.
target: Optional[str]
target identifies where the data should be ingested into.
The value for this parameter depends on the ingest method chosen.
For "http" method, it would require an HTTP URL.
Example: http://example.com/<some>/<api>/<endpoint>
For "file" method, it would require a file name or path.
See chosen ingest method (ingestion plugin) documentation for more details on the expected target format.
This parameter does not need to be used. In case the
`VDK_INGEST_TARGET_DEFAULT` environment variable is set it will be used.
If not, plugins may set a default value.
collection_id: Optional[str]
Optional. An identifier to indicate that data from different method
invocations belong to same collection. Defaults to
"data_job_name|OpID", meaning all method invocations from a data job
run will belong to the same collection.
Sample usage:
tabular_data = [[ "row1column1", 11], ["row2column1", 21]]
job_input.send_tabular_data_for_ingestion(tabular_data,
"my_destination_table",
method="file"
target="my-target")
db_connection = initialize_db_connection()
cursor = db_connection.cursor()
cursor.execute("SELECT * FROM table")
job_input.send_tabular_data_for_ingestion(cursor,
[column_info[0] for column_info in cursor.description],
"my_destination_table",
method="file",
target="my-target")
cursor.close()
"""
pass
class ITemplate:
"""
Templates interface enables to package a whole data job and execute it as single operation.
"""
from vdk.internal.builtin_plugins.run.execution_results import ExecutionResult
@abstractmethod
def execute_template(
self, template_name: str, template_args: dict, database: str = "default"
) -> ExecutionResult:
"""
Execute a data job template.
Templates are pieces of reusable code, which is common to use cases of different customers of VDK.
Templates are executed in the context of a Data Job..
They provide an easy solution to common tasks of loading data to a database or ingesting data in a common way.
For example there are templates for:
* Slowly Changing Dimension Type 1 strategy overwrites the data in target table with the data defined in the source
* Slowly Changing Dimension Type 2 accumulates updates from the data source as versioned records in the target table
There could be many types of templates. Any data job can be made into a template - since at its core template is reusable data job.
You can see the full list of available templates and instructions on their usage here: TODO
Arguments:
template_name: str
Name of data loading template
template_args: dict
Arguments to be passed to the template
database: str
Name of database for which templates require to load and executed
"""
pass
class IJobInput(
IProperties, IManagedConnection, IIngester, ITemplate, IJobArguments, ISecrets
):
@abstractmethod
def get_name(self) -> str:
"""
:return: the name of the currently executing job (can be base data job or template job)
"""
@abstractmethod
def get_job_directory(self) -> pathlib.Path:
"""
:return: the code location of the currently executing job (can be base data job or template job)
"""
@abstractmethod
def get_execution_properties(self) -> dict:
"""
:return: a dictionary with execution-related properties for the current job execution. Example properties:
pa__execution_id: str - Identifier of the execution of this job. (equal to OpID when this job is the first job
from the workflow of jobs that are being executed)
pa__job_start_unixtime: str - the start time of this job in seconds since epoch (an integer number).
pa__job_start_ts_expr: str - a Impala-valid SQL expression that returns a TIMESTAMP that represents job start time.
pa__op_id: str - the Operation ID - identifies the trigger that initiated this job. It is possible to have N jobs
with same OpID (if Job1 started Job2 then Job1.opId = Job2.opId)
Usage:
job_input.get_execution_properties()['pa__job_start_unixtime'] #returns e.g. 1560179839
Those properties can also be used as arguments in SQL. E.g. a .sql file in your job may contain this SELECT:
SELECT id, {pa__job_start_ts_expr} FROM mytable
"""
pass
@abstractmethod
def skip_remaining_steps(self) -> None:
"""
:return:
Usage:
job_input.skip_remaining_steps()
Signalizes to the VDK runtime that all remaining steps should be skipped
and the current job execution should be terminated with status Success. When this method is called from
within a template, the remaining steps of the template will be skipped, but the data job will that called
the template will continue executing normally.
This is to be used when users want
customizable behaviour of data jobs/templates, where execution could be skipped. E.g. if a data job
depends on processing data from a source which has indicated no new entries since last run, then we can skip
the execution.
"""
@abstractmethod
def get_temporary_write_directory(self) -> pathlib.Path:
"""
:return:
Returns a path pointing to a writable directory for
data job executions in the cloud. This is needed because
different cloud deployments may restrict access to the file
system in a cloud execution. In this way data job users can make
sure the returned folder will allow read/write access.
Files written to this directory are temporary and there is no
guarantee that a file created during a local data job execution will
be present in a subsequent local execution. Therefore precautions
must be taken when developing locally since temporary files created
might not have been deleted by the OS. Files created during
cloud executions are deleted when the data job completes. Users can
assume that the temp directory is empty on subsequent cloud
executions. Default returned folder (non cloud executions) is
tempfile.gettempdir() therefore the default temporary directory is
managed by the underlying OS.
"""
pass