forked from oppia/oppia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
jobs.py
1197 lines (991 loc) · 46.9 KB
/
jobs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing long running jobs."""
__author__ = 'Sean Lip'
import ast
import copy
import datetime
import logging
import traceback
import utils
from core.platform import models
(base_models, job_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.job])
taskqueue_services = models.Registry.import_taskqueue_services()
transaction_services = models.Registry.import_transaction_services()
from google.appengine.ext import ndb
from mapreduce import base_handler
from mapreduce import context
from mapreduce import input_readers
from mapreduce import mapreduce_pipeline
from mapreduce import model as mapreduce_model
from mapreduce.lib.pipeline import pipeline
from mapreduce import util as mapreduce_util
MAPPER_PARAM_KEY_ENTITY_KINDS = 'entity_kinds'
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS = 'queued_time_msecs'
# Name of an additional parameter to pass into the MR job for cleaning up
# old auxiliary job models.
MAPPER_PARAM_MAX_START_TIME_MSEC = 'max_start_time_msec'
STATUS_CODE_NEW = job_models.STATUS_CODE_NEW
STATUS_CODE_QUEUED = job_models.STATUS_CODE_QUEUED
STATUS_CODE_STARTED = job_models.STATUS_CODE_STARTED
STATUS_CODE_COMPLETED = job_models.STATUS_CODE_COMPLETED
STATUS_CODE_FAILED = job_models.STATUS_CODE_FAILED
STATUS_CODE_CANCELED = job_models.STATUS_CODE_CANCELED
VALID_STATUS_CODE_TRANSITIONS = {
STATUS_CODE_NEW: [STATUS_CODE_QUEUED],
STATUS_CODE_QUEUED: [STATUS_CODE_STARTED, STATUS_CODE_CANCELED],
STATUS_CODE_STARTED: [
STATUS_CODE_COMPLETED, STATUS_CODE_FAILED, STATUS_CODE_CANCELED],
STATUS_CODE_COMPLETED: [],
STATUS_CODE_FAILED: [],
STATUS_CODE_CANCELED: [],
}
# The default amount of time that defines a 'recent' job. Jobs that were
# queued more recently than this number of milliseconds ago are considered
# 'recent'.
DEFAULT_RECENCY_MSEC = 14 * 24 * 60 * 60 * 1000
# The maximum number of previously-run jobs to show in the admin dashboard.
NUM_JOBS_IN_DASHBOARD_LIMIT = 100
class BaseJobManager(object):
"""Base class for managing long-running jobs.
These jobs are not transaction-safe, and multiple jobs of the same kind
may run at once and overlap. Individual jobs should account for this. In
particular, if a job writes to some location, no other enqueued or running
job should be writing to, or reading from, that location.
This is expected to be the case for one-off migration jobs, as well as
batch reporting jobs. One-off migration jobs are expected to be transient
and will not be a permanent part of the codebase. Batch reporting jobs are
expected to write to a particular datastore model that is optimized for
fast querying; each batch reporting job should correspond to exactly one of
these models. The reporting jobs are expected to be run as MapReduces; to
find existing ones, search for subclasses of BaseMapReduceJobManager.
Note that the enqueue(), register_start(), register_completion(),
register_failure() and cancel() methods in this class batch the following
operations: (a) pre- and post-hooks, (b) updating the status of the job in
the datastore, and (c) actually performing the operation. Each entire batch
is not run in a transaction, but subclasses can still perform (a) or (c)
transactionally if they wish to.
"""
@classmethod
def _is_abstract(cls):
return cls in ABSTRACT_BASE_CLASSES
@classmethod
def create_new(cls):
"""Creates a new job of this class type. Returns the id of this job."""
if cls._is_abstract():
raise Exception(
'Tried to directly create a job using the abstract base '
'manager class %s, which is not allowed.' % cls.__name__)
def _create_new_job():
job_id = job_models.JobModel.get_new_id(cls.__name__)
job_models.JobModel(id=job_id, job_type=cls.__name__).put()
return job_id
return transaction_services.run_in_transaction(_create_new_job)
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
"""Marks a job as queued and adds it to a queue for processing."""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_QUEUED)
cls._require_correct_job_type(model.job_type)
# Enqueue the job.
cls._pre_enqueue_hook(job_id)
cls._real_enqueue(job_id, additional_job_params)
model.status_code = STATUS_CODE_QUEUED
model.time_queued_msec = utils.get_current_time_in_millisecs()
model.put()
cls._post_enqueue_hook(job_id)
@classmethod
def register_start(cls, job_id, metadata=None):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_STARTED)
cls._require_correct_job_type(model.job_type)
cls._pre_start_hook(job_id)
model.metadata = metadata
model.status_code = STATUS_CODE_STARTED
model.time_started_msec = utils.get_current_time_in_millisecs()
model.put()
cls._post_start_hook(job_id)
@classmethod
def register_completion(cls, job_id, output):
"""Marks a job as completed."""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_COMPLETED)
cls._require_correct_job_type(model.job_type)
model.status_code = STATUS_CODE_COMPLETED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.output = output
model.put()
cls._post_completed_hook(job_id)
@classmethod
def register_failure(cls, job_id, error):
"""Marks a job as failed."""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_FAILED)
cls._require_correct_job_type(model.job_type)
model.status_code = STATUS_CODE_FAILED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.error = error
model.put()
cls._post_failure_hook(job_id)
@classmethod
def cancel(cls, job_id, user_id):
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_CANCELED)
cls._require_correct_job_type(model.job_type)
cancel_message = 'Canceled by %s' % (user_id or 'system')
# Cancel the job.
cls._pre_cancel_hook(job_id, cancel_message)
model.status_code = STATUS_CODE_CANCELED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.error = cancel_message
model.put()
cls._post_cancel_hook(job_id, cancel_message)
@classmethod
def is_active(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code in [STATUS_CODE_QUEUED, STATUS_CODE_STARTED]
@classmethod
def has_finished(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code in [STATUS_CODE_COMPLETED, STATUS_CODE_FAILED]
@classmethod
def cancel_all_unfinished_jobs(cls, user_id):
"""Cancel all queued or started jobs of this job type."""
unfinished_job_models = job_models.JobModel.get_unfinished_jobs(
cls.__name__)
for model in unfinished_job_models:
cls.cancel(model.id, user_id)
@classmethod
def _real_enqueue(cls, job_id, additional_job_params):
"""Does the actual work of enqueueing a job for deferred execution.
Must be implemented by subclasses.
"""
raise NotImplementedError(
'Subclasses of BaseJobManager should implement _real_enqueue().')
@classmethod
def get_status_code(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code
@classmethod
def get_time_queued_msec(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_queued_msec
@classmethod
def get_time_started_msec(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_started_msec
@classmethod
def get_time_finished_msec(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_finished_msec
@classmethod
def get_metadata(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.metadata
@classmethod
def get_output(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.output
@classmethod
def get_error(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.error
@classmethod
def _require_valid_transition(
cls, job_id, old_status_code, new_status_code):
valid_new_status_codes = VALID_STATUS_CODE_TRANSITIONS[old_status_code]
if new_status_code not in valid_new_status_codes:
raise Exception(
'Invalid status code change for job %s: from %s to %s' %
(job_id, old_status_code, new_status_code))
@classmethod
def _require_correct_job_type(cls, job_type):
if job_type != cls.__name__:
raise Exception(
'Invalid job type %s for class %s' % (job_type, cls.__name__))
@classmethod
def _pre_enqueue_hook(cls, job_id):
pass
@classmethod
def _post_enqueue_hook(cls, job_id):
pass
@classmethod
def _pre_start_hook(cls, job_id):
pass
@classmethod
def _post_start_hook(cls, job_id):
pass
@classmethod
def _post_completed_hook(cls, job_id):
pass
@classmethod
def _post_failure_hook(cls, job_id):
pass
@classmethod
def _pre_cancel_hook(cls, job_id, cancel_message):
pass
@classmethod
def _post_cancel_hook(cls, job_id, cancel_message):
pass
class BaseDeferredJobManager(BaseJobManager):
@classmethod
def _run(cls):
"""Function that performs the main business logic of the job.
Needs to be implemented by subclasses.
"""
raise NotImplementedError
@classmethod
def _run_job(cls, job_id):
"""Starts the job."""
logging.info(
'Job %s started at %s' %
(job_id, utils.get_current_time_in_millisecs()))
cls.register_start(job_id)
try:
result = cls._run()
except Exception as e:
logging.error(traceback.format_exc())
logging.error(
'Job %s failed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
cls.register_failure(
job_id, '%s\n%s' % (unicode(e), traceback.format_exc()))
raise taskqueue_services.PermanentTaskFailure(
'Task failed: %s\n%s' % (unicode(e), traceback.format_exc()))
# Note that the job may have been canceled after it started and before
# it reached this stage. This will result in an exception when the
# validity of the status code transition is checked.
cls.register_completion(job_id, result)
logging.info(
'Job %s completed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
@classmethod
def _real_enqueue(cls, job_id, unused_additional_job_params):
taskqueue_services.defer(cls._run_job, job_id)
class MapReduceJobPipeline(base_handler.PipelineBase):
def run(self, job_id, job_class_str, kwargs):
job_class = mapreduce_util.for_name(job_class_str)
job_class.register_start(job_id, metadata={
job_class._OUTPUT_KEY_ROOT_PIPELINE_ID: self.root_pipeline_id
})
# TODO(sll): Need try/except/mark-as-canceled here?
output = yield mapreduce_pipeline.MapreducePipeline(**kwargs)
yield StoreMapReduceResults(job_id, job_class_str, output)
def finalized(self):
# Suppress the default Pipeline behavior of sending email.
# TODO(sll): Should mark-as-done be here instead?
pass
class StoreMapReduceResults(base_handler.PipelineBase):
def run(self, job_id, job_class_str, output):
job_class = mapreduce_util.for_name(job_class_str)
try:
iterator = input_readers.RecordsReader(output, 0)
results_list = []
for item in iterator:
# Map/reduce puts reducer output into blobstore files as a
# string obtained via "str(result)". Use AST as a safe
# alternative to eval() to get the Python object back.
results_list.append(ast.literal_eval(item))
job_class.register_completion(job_id, results_list)
except Exception as e:
logging.error(traceback.format_exc())
logging.error(
'Job %s failed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
job_class.register_failure(
job_id,
'%s\n%s' % (unicode(e), traceback.format_exc()))
class BaseMapReduceJobManager(BaseJobManager):
# The output for this job is a list of individual results. Each item in
# the list will be of whatever type is yielded from the 'reduce' method.
#
# The 'metadata' field in the BaseJob representing a MapReduceJob
# is a dict with one key, _OUTPUT_KEY_ROOT_PIPELINE_ID. The corresponding
# value is a string representing the ID of the MapReduceJobPipeline
# as known to the mapreduce/lib/pipeline internals. This is used
# to generate URLs pointing at the pipeline support UI.
_OUTPUT_KEY_ROOT_PIPELINE_ID = 'root_pipeline_id'
@staticmethod
def get_mapper_param(param_name):
mapper_params = context.get().mapreduce_spec.mapper.params
if param_name not in mapper_params:
raise Exception(
'Could not find %s in %s' % (param_name, mapper_params))
return context.get().mapreduce_spec.mapper.params[param_name]
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of reference to the datastore classes to map over."""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement '
'entity_classes_to_map_over()')
@staticmethod
def map(item):
"""Implements the map function. Must be declared @staticmethod.
Args:
item: The parameter passed to this function is a single element of
the type given by entity_class(). This function may yield as many
times as appropriate (including zero) to return key/value 2-tuples.
For example, to get a count of all explorations, one might yield
(exploration.id, 1).
WARNING: The OutputWriter converts mapper output keys to type str.
So, if you have keys that are of type unicode, you must yield
"key.encode('utf-8')", rather than "key".
"""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement map '
'as a @staticmethod.')
@staticmethod
def reduce(key, values):
"""Implements the reduce function. Must be declared @staticmethod.
This function should yield whatever it likes; the recommended thing to
do is emit entities. All emitted outputs from all reducers will be
collected in an array and set into the output value for the job, so
don't pick anything huge. If you need something huge, persist it out
into the datastore instead and return a reference (and dereference it
later to load content as needed).
Args:
key: A key value as emitted from the map() function, above.
values: A list of all values from all mappers that were tagged with
the given key. This code can assume that it is the only process
handling values for this key. (It can probably also assume that
it will be called exactly once for each key with all of the output,
but this needs to be verified.)
"""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement '
'reduce as a @staticmethod.')
@classmethod
def _real_enqueue(cls, job_id, additional_job_params):
entity_class_types = cls.entity_classes_to_map_over()
entity_class_names = [
'%s.%s' % (
entity_class_type.__module__, entity_class_type.__name__)
for entity_class_type in entity_class_types]
kwargs = {
'job_name': job_id,
'mapper_spec': '%s.%s.map' % (cls.__module__, cls.__name__),
'reducer_spec': '%s.%s.reduce' % (cls.__module__, cls.__name__),
'input_reader_spec': (
'core.jobs.MultipleDatastoreEntitiesInputReader'),
'output_writer_spec': (
'mapreduce.output_writers.BlobstoreRecordsOutputWriter'),
'mapper_params': {
MAPPER_PARAM_KEY_ENTITY_KINDS: entity_class_names,
# Note that all parameters passed to the mapper need to be
# strings. Also note that the value for this key is determined
# just before enqueue time, so it will be roughly equal to the
# actual enqueue time.
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS: str(
utils.get_current_time_in_millisecs()),
}
}
if additional_job_params is not None:
for param_name in additional_job_params:
if param_name in kwargs['mapper_params']:
raise Exception(
'Additional job param %s shadows an existing mapper '
'param' % param_name)
kwargs['mapper_params'][param_name] = copy.deepcopy(
additional_job_params[param_name])
mr_pipeline = MapReduceJobPipeline(
job_id, '%s.%s' % (cls.__module__, cls.__name__), kwargs)
mr_pipeline.start(base_path='/mapreduce/worker/pipeline')
@classmethod
def _pre_cancel_hook(cls, job_id, cancel_message):
metadata = cls.get_metadata(job_id)
root_pipeline_id = metadata[cls._OUTPUT_KEY_ROOT_PIPELINE_ID]
pipeline.Pipeline.from_id(root_pipeline_id).abort(cancel_message)
@staticmethod
def _entity_created_before_job_queued(entity):
"""Checks that the given entity was created before the MR job was queued.
Mapper methods may want to use this as a precomputation check,
especially if the datastore classes being iterated over are append-only
event logs.
"""
created_on_msec = utils.get_time_in_millisecs(entity.created_on)
job_queued_msec = float(context.get().mapreduce_spec.mapper.params[
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS])
return job_queued_msec >= created_on_msec
class MultipleDatastoreEntitiesInputReader(input_readers.InputReader):
_ENTITY_KINDS_PARAM = 'entity_kinds'
_READER_LIST_PARAM = 'readers'
def __init__(self, reader_list):
self._reader_list = reader_list
def __iter__(self):
for reader in self._reader_list:
yield reader
@classmethod
def from_json(cls, input_shard_state):
return cls(input_readers.DatastoreInputReader.from_json(
input_shard_state[cls._READER_LIST_PARAM]))
def to_json(self):
return {
self._READER_LIST_PARAM: self._reader_list.to_json()
}
@classmethod
def split_input(cls, mapper_spec):
params = mapper_spec.params
entity_kinds = params.get(cls._ENTITY_KINDS_PARAM)
splits = []
for entity_kind in entity_kinds:
new_mapper_spec = copy.deepcopy(mapper_spec)
new_mapper_spec.params['entity_kind'] = entity_kind
splits.append(
input_readers.DatastoreInputReader.split_input(
new_mapper_spec))
inputs = []
for split in splits:
for item in split:
inputs.append(MultipleDatastoreEntitiesInputReader(item))
return inputs
@classmethod
def validate(cls, mapper_spec):
return True # TODO
class BaseMapReduceJobManagerForContinuousComputations(BaseMapReduceJobManager):
@classmethod
def _get_continuous_computation_class(cls):
"""Returns the ContinuousComputationManager class associated with this
MapReduce job.
"""
raise NotImplementedError(
'Subclasses of BaseMapReduceJobManagerForContinuousComputations '
'must implement the _get_continuous_computation_class() method.')
@staticmethod
def _get_job_queued_msec():
return float(context.get().mapreduce_spec.mapper.params[
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS])
@staticmethod
def _entity_created_before_job_queued(entity):
"""Checks that the given entity was created before the MR job was queued.
Mapper methods may want to use this as a precomputation check,
especially if the datastore classes being iterated over are append-only
event logs.
"""
created_on_msec = utils.get_time_in_millisecs(entity.created_on)
job_queued_msec = float(context.get().mapreduce_spec.mapper.params[
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS])
return job_queued_msec >= created_on_msec
@classmethod
def _post_completed_hook(cls, job_id):
cls._get_continuous_computation_class().on_batch_job_completion()
@classmethod
def _post_cancel_hook(cls, job_id, cancel_message):
cls._get_continuous_computation_class().on_batch_job_canceled()
@classmethod
def _post_failure_hook(cls, job_id):
cls._get_continuous_computation_class().on_batch_job_failure()
class BaseRealtimeDatastoreClassForContinuousComputations(
base_models.BaseModel):
"""Storage class for entities in the realtime layer.
Instances of this class represent individual entities that are stored in
the realtime datastore. Note that the realtime datastore may be formatted
differently from the datastores that are iterated over by the MapReduce
job.
The IDs for instances of this class are of the form 0:... or 1:..., where
the 0 or 1 indicates the realtime layer that the entity is in.
NOTE TO DEVELOPERS: Ensure that you wrap the id with get_realtime_id()
when doing creations, gets, puts and queries, in order to ensure that the
relevant layer prefix gets appended.
"""
realtime_layer = ndb.IntegerProperty(required=True, choices=[0, 1])
@classmethod
def get_realtime_id(cls, layer_index, raw_entity_id):
"""Returns an ID used to identify the element with the given entity id
in the currently active realtime datastore layer.
"""
return '%s:%s' % (layer_index, raw_entity_id)
@classmethod
def delete_layer(cls, layer_index, latest_created_on_datetime):
"""Deletes all entities in the given layer which were created before
the given datetime.
"""
query = cls.query().filter(cls.realtime_layer == layer_index).filter(
cls.created_on < latest_created_on_datetime)
ndb.delete_multi(query.iter(keys_only=True))
@classmethod
def _is_valid_realtime_id(cls, realtime_id):
return realtime_id.startswith('0:') or realtime_id.startswith('1:')
@classmethod
def get(cls, entity_id, strict=True):
if not cls._is_valid_realtime_id(entity_id):
raise ValueError('Invalid realtime id: %s' % entity_id)
return super(
BaseRealtimeDatastoreClassForContinuousComputations, cls
).get(entity_id, strict=strict)
def put(self):
if (self.realtime_layer is None or
str(self.realtime_layer) != self.id[0]):
raise Exception(
'Realtime layer %s does not match realtime id %s' %
(self.realtime_layer, self.id))
return super(
BaseRealtimeDatastoreClassForContinuousComputations, self).put()
class BaseContinuousComputationManager(object):
"""This class represents a manager for a continuously-running computation.
Such computations consist of two parts: a batch job to compute summary
views, and a realtime layer to augment these batch views with additional
data that has come in since the last batch job results were computed. The
realtime layer may provide only approximate results, but the discrepancy
should be small because the realtime layer is expected to handle a much
smaller amount of data than the batch layer.
The batch jobs are run continuously, with each batch job starting
immediately after the previous run has finished. There are two realtime
layers that are cleared alternatively after successive batch runs, just
before a new batch job is enqueued. Events are recorded to all three
layers.
Here is a schematic showing how this works. The x-axis represents the
progression of time. The arrowed intervals in the batch layer indicate how
long the corresponding batch job takes to run, and the intervals in each
realtime layer indicate spans between when the data in the realtime layer
is cleared. Note that a realtime layer is cleared as part of the post-
processing that happens when a batch job completes, which explains why the
termination of each batch interval and one of the realtime intervals
always coincides. Having two realtime layers allows the inactive layer to
be cleared whilst not affecting incoming queries to the active layer.
Batch layer <-----> <-------> <-------> <-------> <-------->
Realtime layer R0 <-----> <------------------> <------------------>
Realtime layer R1 <----------------> <-----------------> <------ ...
<-- A --> <-- B -->
For example, queries arising during the time interval A will use the
results of the first batch run, plus data from the realtime layer R1.
Queries arising during the time interval B will use the results of the
second batch run, plus data from the realtime layer R0.
"""
# TODO(sll): In the previous docstring, quantify what 'small' means
# once we have some experience with this running in production.
@classmethod
def get_event_types_listened_to(cls):
"""Returns a list of event types that this class subscribes to."""
raise NotImplementedError(
'Subclasses of BaseContinuousComputationManager must implement '
'get_event_types_listened_to(). This method should return a list '
'of strings, each representing an event type that this class '
'subscribes to.')
@classmethod
def _get_realtime_datastore_class(cls):
"""Returns the datastore class used by the realtime layer, which should
subclass BaseRealtimeDatastoreClassForContinuousComputations. See
StartExplorationRealtimeModel in core/jobs_test.py for an example
of how to do this.
"""
raise NotImplementedError(
'Subclasses of BaseContinuousComputationManager must implement '
'_get_realtime_datastore_class(). This method should return '
'the datastore class to be used by the realtime layer.')
@classmethod
def _get_batch_job_manager_class(cls):
"""Returns the manager class for the continuously-running batch job.
See jobs_test.py for an example of how to do this.
"""
raise NotImplementedError(
'Subclasses of BaseContinuousComputationManager must implement '
'_get_batch_job_manager_class(). This method should return the'
'manager class for the continuously-running batch job.')
@classmethod
def _handle_incoming_event(
cls, active_realtime_layer, event_type, *args, **kwargs):
"""Records incoming events in the given realtime layer.
This method should be implemented by subclasses. The args are the
same as those sent to the event handler corresponding to the event
type. Note that there may be more than one event type.
IMPORTANT: This method only gets called as part of the dequeue process
from a deferred task queue. Developers should expect a delay to occur
between when the incoming event arrives and when this method is called,
and should resolve any arguments that depend on local session
variables (such as the user currently in session) before enqueueing
this method in the corresponding event handler.
IMPORTANT: If an exception is raised here, the task queue will retry
calling it and any mutations made will be redone -- unless the
exception has type taskqueue_services.PermanentTaskFailure. Developers
should therefore ensure that _handle_incoming_event() is robust to
multiple calls for the same incoming event.
"""
raise NotImplementedError(
'Subclasses of BaseContinuousComputationManager must implement '
'_handle_incoming_event(...). Please check the docstring of this '
'method in jobs.BaseContinuousComputationManager for important '
'developer information.')
@classmethod
def _get_active_realtime_index(cls):
def _get_active_realtime_index_transactional():
cc_model = job_models.ContinuousComputationModel.get(
cls.__name__, strict=False)
if cc_model is None:
cc_model = job_models.ContinuousComputationModel(
id=cls.__name__)
cc_model.put()
return cc_model.active_realtime_layer_index
return transaction_services.run_in_transaction(
_get_active_realtime_index_transactional)
@classmethod
def get_active_realtime_layer_id(cls, entity_id):
"""Returns an ID used to identify the element with the given entity id
in the currently active realtime datastore layer.
"""
return cls._get_realtime_datastore_class().get_realtime_id(
cls._get_active_realtime_index(), entity_id)
@classmethod
def _switch_active_realtime_class(cls):
def _switch_active_realtime_class_transactional():
cc_model = job_models.ContinuousComputationModel.get(
cls.__name__)
cc_model.active_realtime_layer_index = (
1 - cc_model.active_realtime_layer_index)
cc_model.put()
transaction_services.run_in_transaction(
_switch_active_realtime_class_transactional)
@classmethod
def _clear_inactive_realtime_layer(
cls, latest_created_on_datetime):
"""Deletes all entries in the given realtime datastore class whose
created_on date is before latest_timestamp.
"""
inactive_realtime_index = 1 - cls._get_active_realtime_index()
cls._get_realtime_datastore_class().delete_layer(
inactive_realtime_index, latest_created_on_datetime)
@classmethod
def _kickoff_batch_job(cls):
"""Create and enqueue a new batch job."""
if job_models.JobModel.do_unfinished_jobs_exist(cls.__name__):
logging.error(
'Tried to start a new batch job of type %s while an existing '
'job was still running ' % cls.__name__)
return
job_manager = cls._get_batch_job_manager_class()
job_id = job_manager.create_new()
job_manager.enqueue(job_id)
@classmethod
def _register_end_of_batch_job_and_return_status(cls):
"""Processes a 'job finished' event and returns the job's updated status
code.
Note that 'finish' in this context might mean 'completed successfully'
or 'failed'.
Processing means the following: if the job is currently 'stopping', its
status is set to 'idle'; otherwise, its status remains as 'running'.
"""
def _register_end_of_batch_job_transactional():
"""Transactionally change the computation's status when a batch job
ends."""
cc_model = job_models.ContinuousComputationModel.get(cls.__name__)
if (cc_model.status_code ==
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_STOPPING):
cc_model.status_code = (
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
cc_model.put()
return cc_model.status_code
return transaction_services.run_in_transaction(
_register_end_of_batch_job_transactional)
@classmethod
def get_status_code(cls):
"""Returns the status code of the job."""
return job_models.ContinuousComputationModel.get(
cls.__name__).status_code
@classmethod
def start_computation(cls):
"""(Re)starts the continuous computation corresponding to this class.
Raises an Exception if the computation is already running.
"""
def _start_computation_transactional():
"""Transactional implementation for marking a continuous
computation as started.
"""
cc_model = job_models.ContinuousComputationModel.get(
cls.__name__, strict=False)
if cc_model is None:
cc_model = job_models.ContinuousComputationModel(
id=cls.__name__)
if (cc_model.status_code !=
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE):
raise Exception(
'Attempted to start computation %s, which is already '
'running.' % cls.__name__)
cc_model.status_code = (
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
cc_model.last_started_msec = utils.get_current_time_in_millisecs()
cc_model.put()
transaction_services.run_in_transaction(
_start_computation_transactional)
cls._clear_inactive_realtime_layer(datetime.datetime.utcnow())
cls._kickoff_batch_job()
@classmethod
def stop_computation(cls, user_id, test_mode=False):
"""Cancels the currently-running batch job.
No further batch runs will be kicked off.
"""
# This is not an ancestor query, so it must be run outside a
# transaction.
do_unfinished_jobs_exist = (
job_models.JobModel.do_unfinished_jobs_exist(
cls._get_batch_job_manager_class().__name__))
def _stop_computation_transactional():
"""Transactional implementation for marking a continuous
computation as stopping/idle.
"""
cc_model = job_models.ContinuousComputationModel.get(cls.__name__)
# If there is no job currently running, go to IDLE immediately.
new_status_code = (
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_STOPPING if
do_unfinished_jobs_exist else
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
cc_model.status_code = new_status_code
cc_model.last_stopped_msec = utils.get_current_time_in_millisecs()
cc_model.put()
transaction_services.run_in_transaction(
_stop_computation_transactional)
# The cancellation must be done after the continuous computation
# status update.
if do_unfinished_jobs_exist:
unfinished_job_models = job_models.JobModel.get_unfinished_jobs(
cls._get_batch_job_manager_class().__name__)
for job_model in unfinished_job_models:
cls._get_batch_job_manager_class().cancel(
job_model.id, user_id)
@classmethod
def on_incoming_event(cls, event_type, *args, **kwargs):
"""Handle an incoming event by recording it in both realtime datastore
layers.
The *args and **kwargs match those passed to the _handle_event() method
of the corresponding EventHandler subclass.
"""
REALTIME_LAYERS = [0, 1]
for layer in REALTIME_LAYERS:
cls._handle_incoming_event(layer, event_type, *args, **kwargs)
@classmethod
def _process_job_completion_and_return_status(cls):
"""Delete all data in the currently-active realtime_datastore class,
switch the active class, and return the status.
This seam was created so that tests would be able to override
on_batch_job_completion() to avoid kicking off the next job
immediately.
"""
cls._switch_active_realtime_class()
cls._clear_inactive_realtime_layer(datetime.datetime.utcnow())
def _update_last_finished_time_transactional():
cc_model = job_models.ContinuousComputationModel.get(cls.__name__)
cc_model.last_finished_msec = utils.get_current_time_in_millisecs()
cc_model.put()
transaction_services.run_in_transaction(
_update_last_finished_time_transactional)
return cls._register_end_of_batch_job_and_return_status()
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
"""Seam that can be overridden by tests."""
cls._kickoff_batch_job()
@classmethod
def on_batch_job_completion(cls):
"""Called when a batch job completes."""
job_status = cls._process_job_completion_and_return_status()
if job_status == job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING:
cls._kickoff_batch_job_after_previous_one_ends()
@classmethod
def on_batch_job_canceled(cls):
logging.info('Job %s canceled.' % cls.__name__)
# The job should already be stopping, and should therefore be marked
# idle.
job_status = cls._register_end_of_batch_job_and_return_status()
if job_status != job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE:
logging.error(
'Batch job for computation %s canceled but status code not set '
'to idle.' % cls.__name__)
@classmethod
def on_batch_job_failure(cls):
# TODO(sll): Alert the site admin via email.
logging.error('Job %s failed.' % cls.__name__)
job_status = cls._register_end_of_batch_job_and_return_status()