forked from python/mypy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathstubgen.py
executable file
·1893 lines (1679 loc) · 70.3 KB
/
stubgen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
"""Generator of dynamically typed draft stubs for arbitrary modules.
The logic of this script can be split in three steps:
* parsing options and finding sources:
- use runtime imports be default (to find also C modules)
- or use mypy's mechanisms, if importing is prohibited
* (optionally) semantically analysing the sources using mypy (as a single set)
* emitting the stubs text:
- for Python modules: from ASTs using ASTStubGenerator
- for C modules using runtime introspection and (optionally) Sphinx docs
During first and third steps some problematic files can be skipped, but any
blocking error during second step will cause the whole program to stop.
Basic usage:
$ stubgen foo.py bar.py some_directory
=> Generate out/foo.pyi, out/bar.pyi, and stubs for some_directory (recursively).
$ stubgen -m urllib.parse
=> Generate out/urllib/parse.pyi.
$ stubgen -p urllib
=> Generate stubs for whole urllib package (recursively).
For C modules, you can get more precise function signatures by parsing .rst (Sphinx)
documentation for extra information. For this, use the --doc-dir option:
$ stubgen --doc-dir <DIR>/Python-3.4.2/Doc/library -m curses
Note: The generated stubs should be verified manually.
TODO:
- maybe use .rst docs also for Python modules
- maybe export more imported names if there is no __all__ (this affects ssl.SSLError, for example)
- a quick and dirty heuristic would be to turn this on if a module has something like
'from x import y as _y'
- we don't seem to always detect properties ('closed' in 'io', for example)
"""
from __future__ import annotations
import argparse
import keyword
import os
import os.path
import sys
import traceback
from typing import Final, Iterable, Iterator
import mypy.build
import mypy.mixedtraverser
import mypy.parse
import mypy.traverser
import mypy.util
from mypy.build import build
from mypy.errors import CompileError, Errors
from mypy.find_sources import InvalidSourceList, create_source_list
from mypy.modulefinder import (
BuildSource,
FindModuleCache,
ModuleNotFoundReason,
SearchPaths,
default_lib_path,
)
from mypy.moduleinspect import ModuleInspect, is_pyc_only
from mypy.nodes import (
ARG_NAMED,
ARG_POS,
ARG_STAR,
ARG_STAR2,
IS_ABSTRACT,
NOT_ABSTRACT,
AssignmentStmt,
Block,
BytesExpr,
CallExpr,
ClassDef,
ComparisonExpr,
ComplexExpr,
Decorator,
DictExpr,
EllipsisExpr,
Expression,
ExpressionStmt,
FloatExpr,
FuncBase,
FuncDef,
IfStmt,
Import,
ImportAll,
ImportFrom,
IndexExpr,
IntExpr,
ListExpr,
MemberExpr,
MypyFile,
NameExpr,
OpExpr,
OverloadedFuncDef,
SetExpr,
StarExpr,
Statement,
StrExpr,
TempNode,
TupleExpr,
TypeInfo,
UnaryExpr,
Var,
)
from mypy.options import Options as MypyOptions
from mypy.sharedparse import MAGIC_METHODS_POS_ARGS_ONLY
from mypy.stubdoc import ArgSig, FunctionSig
from mypy.stubgenc import InspectionStubGenerator, generate_stub_for_c_module
from mypy.stubutil import (
TYPING_BUILTIN_REPLACEMENTS,
BaseStubGenerator,
CantImport,
ClassInfo,
FunctionContext,
common_dir_prefix,
fail_missing,
find_module_path_and_all_py3,
generate_guarded,
infer_method_arg_types,
infer_method_ret_type,
remove_misplaced_type_comments,
report_missing,
walk_packages,
)
from mypy.traverser import (
all_yield_expressions,
has_return_statement,
has_yield_expression,
has_yield_from_expression,
)
from mypy.types import (
OVERLOAD_NAMES,
TPDICT_NAMES,
TYPED_NAMEDTUPLE_NAMES,
AnyType,
CallableType,
Instance,
TupleType,
Type,
UnboundType,
get_proper_type,
)
from mypy.visitor import NodeVisitor
# Common ways of naming package containing vendored modules.
VENDOR_PACKAGES: Final = ["packages", "vendor", "vendored", "_vendor", "_vendored_packages"]
# Avoid some file names that are unnecessary or likely to cause trouble (\n for end of path).
BLACKLIST: Final = [
"/six.py\n", # Likely vendored six; too dynamic for us to handle
"/vendored/", # Vendored packages
"/vendor/", # Vendored packages
"/_vendor/",
"/_vendored_packages/",
]
# These methods are expected to always return a non-trivial value.
METHODS_WITH_RETURN_VALUE: Final = {
"__ne__",
"__eq__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__hash__",
"__iter__",
}
class Options:
"""Represents stubgen options.
This class is mutable to simplify testing.
"""
def __init__(
self,
pyversion: tuple[int, int],
no_import: bool,
inspect: bool,
doc_dir: str,
search_path: list[str],
interpreter: str,
parse_only: bool,
ignore_errors: bool,
include_private: bool,
output_dir: str,
modules: list[str],
packages: list[str],
files: list[str],
verbose: bool,
quiet: bool,
export_less: bool,
include_docstrings: bool,
) -> None:
# See parse_options for descriptions of the flags.
self.pyversion = pyversion
self.no_import = no_import
self.inspect = inspect
self.doc_dir = doc_dir
self.search_path = search_path
self.interpreter = interpreter
self.decointerpreter = interpreter
self.parse_only = parse_only
self.ignore_errors = ignore_errors
self.include_private = include_private
self.output_dir = output_dir
self.modules = modules
self.packages = packages
self.files = files
self.verbose = verbose
self.quiet = quiet
self.export_less = export_less
self.include_docstrings = include_docstrings
class StubSource:
"""A single source for stub: can be a Python or C module.
A simple extension of BuildSource that also carries the AST and
the value of __all__ detected at runtime.
"""
def __init__(
self, module: str, path: str | None = None, runtime_all: list[str] | None = None
) -> None:
self.source = BuildSource(path, module, None)
self.runtime_all = runtime_all
self.ast: MypyFile | None = None
def __repr__(self) -> str:
return f"StubSource({self.source})"
@property
def module(self) -> str:
return self.source.module
@property
def path(self) -> str | None:
return self.source.path
# What was generated previously in the stub file. We keep track of these to generate
# nicely formatted output (add empty line between non-empty classes, for example).
EMPTY: Final = "EMPTY"
FUNC: Final = "FUNC"
CLASS: Final = "CLASS"
EMPTY_CLASS: Final = "EMPTY_CLASS"
VAR: Final = "VAR"
NOT_IN_ALL: Final = "NOT_IN_ALL"
# Indicates that we failed to generate a reasonable output
# for a given node. These should be manually replaced by a user.
ERROR_MARKER: Final = "<ERROR>"
class AliasPrinter(NodeVisitor[str]):
"""Visitor used to collect type aliases _and_ type variable definitions.
Visit r.h.s of the definition to get the string representation of type alias.
"""
def __init__(self, stubgen: ASTStubGenerator) -> None:
self.stubgen = stubgen
super().__init__()
def visit_call_expr(self, node: CallExpr) -> str:
# Call expressions are not usually types, but we also treat `X = TypeVar(...)` as a
# type alias that has to be preserved (even if TypeVar is not the same as an alias)
callee = node.callee.accept(self)
args = []
for name, arg, kind in zip(node.arg_names, node.args, node.arg_kinds):
if kind == ARG_POS:
args.append(arg.accept(self))
elif kind == ARG_STAR:
args.append("*" + arg.accept(self))
elif kind == ARG_STAR2:
args.append("**" + arg.accept(self))
elif kind == ARG_NAMED:
args.append(f"{name}={arg.accept(self)}")
else:
raise ValueError(f"Unknown argument kind {kind} in call")
return f"{callee}({', '.join(args)})"
def _visit_ref_expr(self, node: NameExpr | MemberExpr) -> str:
fullname = self.stubgen.get_fullname(node)
if fullname in TYPING_BUILTIN_REPLACEMENTS:
return self.stubgen.add_name(TYPING_BUILTIN_REPLACEMENTS[fullname], require=False)
qualname = get_qualified_name(node)
self.stubgen.import_tracker.require_name(qualname)
return qualname
def visit_name_expr(self, node: NameExpr) -> str:
return self._visit_ref_expr(node)
def visit_member_expr(self, o: MemberExpr) -> str:
return self._visit_ref_expr(o)
def visit_str_expr(self, node: StrExpr) -> str:
return repr(node.value)
def visit_index_expr(self, node: IndexExpr) -> str:
base_fullname = self.stubgen.get_fullname(node.base)
if base_fullname == "typing.Union":
if isinstance(node.index, TupleExpr):
return " | ".join([item.accept(self) for item in node.index.items])
return node.index.accept(self)
if base_fullname == "typing.Optional":
return f"{node.index.accept(self)} | None"
base = node.base.accept(self)
index = node.index.accept(self)
if len(index) > 2 and index.startswith("(") and index.endswith(")"):
index = index[1:-1]
return f"{base}[{index}]"
def visit_tuple_expr(self, node: TupleExpr) -> str:
return f"({', '.join(n.accept(self) for n in node.items)})"
def visit_list_expr(self, node: ListExpr) -> str:
return f"[{', '.join(n.accept(self) for n in node.items)}]"
def visit_dict_expr(self, o: DictExpr) -> str:
dict_items = []
for key, value in o.items:
# This is currently only used for TypedDict where all keys are strings.
assert isinstance(key, StrExpr)
dict_items.append(f"{key.accept(self)}: {value.accept(self)}")
return f"{{{', '.join(dict_items)}}}"
def visit_ellipsis(self, node: EllipsisExpr) -> str:
return "..."
def visit_op_expr(self, o: OpExpr) -> str:
return f"{o.left.accept(self)} {o.op} {o.right.accept(self)}"
def visit_star_expr(self, o: StarExpr) -> str:
return f"*{o.expr.accept(self)}"
def find_defined_names(file: MypyFile) -> set[str]:
finder = DefinitionFinder()
file.accept(finder)
return finder.names
def get_assigned_names(lvalues: Iterable[Expression]) -> Iterator[str]:
for lvalue in lvalues:
if isinstance(lvalue, NameExpr):
yield lvalue.name
elif isinstance(lvalue, TupleExpr):
yield from get_assigned_names(lvalue.items)
class DefinitionFinder(mypy.traverser.TraverserVisitor):
"""Find names of things defined at the top level of a module."""
def __init__(self) -> None:
# Short names of things defined at the top level.
self.names: set[str] = set()
def visit_class_def(self, o: ClassDef) -> None:
# Don't recurse into classes, as we only keep track of top-level definitions.
self.names.add(o.name)
def visit_func_def(self, o: FuncDef) -> None:
# Don't recurse, as we only keep track of top-level definitions.
self.names.add(o.name)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
for name in get_assigned_names(o.lvalues):
self.names.add(name)
def find_referenced_names(file: MypyFile) -> set[str]:
finder = ReferenceFinder()
file.accept(finder)
return finder.refs
def is_none_expr(expr: Expression) -> bool:
return isinstance(expr, NameExpr) and expr.name == "None"
class ReferenceFinder(mypy.mixedtraverser.MixedTraverserVisitor):
"""Find all name references (both local and global)."""
# TODO: Filter out local variable and class attribute references
def __init__(self) -> None:
# Short names of things defined at the top level.
self.refs: set[str] = set()
def visit_block(self, block: Block) -> None:
if not block.is_unreachable:
super().visit_block(block)
def visit_name_expr(self, e: NameExpr) -> None:
self.refs.add(e.name)
def visit_instance(self, t: Instance) -> None:
self.add_ref(t.type.name)
super().visit_instance(t)
def visit_unbound_type(self, t: UnboundType) -> None:
if t.name:
self.add_ref(t.name)
def visit_tuple_type(self, t: TupleType) -> None:
# Ignore fallback
for item in t.items:
item.accept(self)
def visit_callable_type(self, t: CallableType) -> None:
# Ignore fallback
for arg in t.arg_types:
arg.accept(self)
t.ret_type.accept(self)
def add_ref(self, fullname: str) -> None:
self.refs.add(fullname)
while "." in fullname:
fullname = fullname.rsplit(".", 1)[0]
self.refs.add(fullname)
class ASTStubGenerator(BaseStubGenerator, mypy.traverser.TraverserVisitor):
"""Generate stub text from a mypy AST."""
def __init__(
self,
_all_: list[str] | None = None,
include_private: bool = False,
analyzed: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
) -> None:
super().__init__(_all_, include_private, export_less, include_docstrings)
self._decorators: list[str] = []
# Stack of defined variables (per scope).
self._vars: list[list[str]] = [[]]
# What was generated previously in the stub file.
self._state = EMPTY
self._current_class: ClassDef | None = None
# Was the tree semantically analysed before?
self.analyzed = analyzed
# Short names of methods defined in the body of the current class
self.method_names: set[str] = set()
self.processing_enum = False
self.processing_dataclass = False
def visit_mypy_file(self, o: MypyFile) -> None:
self.module_name = o.fullname # Current module being processed
self.path = o.path
self.set_defined_names(find_defined_names(o))
self.referenced_names = find_referenced_names(o)
super().visit_mypy_file(o)
self.check_undefined_names()
def visit_overloaded_func_def(self, o: OverloadedFuncDef) -> None:
"""@property with setters and getters, @overload chain and some others."""
overload_chain = False
for item in o.items:
if not isinstance(item, Decorator):
continue
if self.is_private_name(item.func.name, item.func.fullname):
continue
self.process_decorator(item)
if not overload_chain:
self.visit_func_def(item.func)
if item.func.is_overload:
overload_chain = True
elif item.func.is_overload:
self.visit_func_def(item.func)
else:
# skip the overload implementation and clear the decorator we just processed
self.clear_decorators()
def get_default_function_sig(self, func_def: FuncDef, ctx: FunctionContext) -> FunctionSig:
args = self._get_func_args(func_def, ctx)
retname = self._get_func_return(func_def, ctx)
return FunctionSig(func_def.name, args, retname)
def _get_func_args(self, o: FuncDef, ctx: FunctionContext) -> list[ArgSig]:
args: list[ArgSig] = []
# Ignore pos-only status of magic methods whose args names are elided by mypy at parse
actually_pos_only_args = o.name not in MAGIC_METHODS_POS_ARGS_ONLY
pos_only_marker_position = 0 # Where to insert "/", if any
for i, arg_ in enumerate(o.arguments):
var = arg_.variable
kind = arg_.kind
name = var.name
annotated_type = (
o.unanalyzed_type.arg_types[i]
if isinstance(o.unanalyzed_type, CallableType)
else None
)
# I think the name check is incorrect: there are libraries which
# name their 0th argument other than self/cls
is_self_arg = i == 0 and name == "self"
is_cls_arg = i == 0 and name == "cls"
typename: str | None = None
if annotated_type and not is_self_arg and not is_cls_arg:
# Luckily, an argument explicitly annotated with "Any" has
# type "UnboundType" and will not match.
if not isinstance(get_proper_type(annotated_type), AnyType):
typename = self.print_annotation(annotated_type)
if actually_pos_only_args and arg_.pos_only:
pos_only_marker_position += 1
if kind.is_named() and not any(arg.name.startswith("*") for arg in args):
args.append(ArgSig("*"))
default = "..."
if arg_.initializer:
if not typename:
typename = self.get_str_type_of_node(arg_.initializer, True, False)
potential_default, valid = self.get_str_default_of_node(arg_.initializer)
if valid and len(potential_default) <= 200:
default = potential_default
elif kind == ARG_STAR:
name = f"*{name}"
elif kind == ARG_STAR2:
name = f"**{name}"
args.append(
ArgSig(name, typename, default=bool(arg_.initializer), default_value=default)
)
if pos_only_marker_position:
args.insert(pos_only_marker_position, ArgSig("/"))
if ctx.class_info is not None and all(
arg.type is None and arg.default is False for arg in args
):
new_args = infer_method_arg_types(
ctx.name, ctx.class_info.self_var, [arg.name for arg in args]
)
if new_args is not None:
args = new_args
return args
def _get_func_return(self, o: FuncDef, ctx: FunctionContext) -> str | None:
if o.name != "__init__" and isinstance(o.unanalyzed_type, CallableType):
if isinstance(get_proper_type(o.unanalyzed_type.ret_type), AnyType):
# Luckily, a return type explicitly annotated with "Any" has
# type "UnboundType" and will enter the else branch.
return None # implicit Any
else:
return self.print_annotation(o.unanalyzed_type.ret_type)
if o.abstract_status == IS_ABSTRACT or o.name in METHODS_WITH_RETURN_VALUE:
# Always assume abstract methods return Any unless explicitly annotated. Also
# some dunder methods should not have a None return type.
return None # implicit Any
retname = infer_method_ret_type(o.name)
if retname is not None:
return retname
if has_yield_expression(o) or has_yield_from_expression(o):
generator_name = self.add_name("collections.abc.Generator")
yield_name = "None"
send_name = "None"
return_name = "None"
if has_yield_from_expression(o):
yield_name = send_name = self.add_name("_typeshed.Incomplete")
else:
for expr, in_assignment in all_yield_expressions(o):
if expr.expr is not None and not is_none_expr(expr.expr):
yield_name = self.add_name("_typeshed.Incomplete")
if in_assignment:
send_name = self.add_name("_typeshed.Incomplete")
if has_return_statement(o):
return_name = self.add_name("_typeshed.Incomplete")
return f"{generator_name}[{yield_name}, {send_name}, {return_name}]"
if not has_return_statement(o) and o.abstract_status == NOT_ABSTRACT:
return "None"
return None
def _get_func_docstring(self, node: FuncDef) -> str | None:
if not node.body.body:
return None
expr = node.body.body[0]
if isinstance(expr, ExpressionStmt) and isinstance(expr.expr, StrExpr):
return expr.expr.value
return None
def visit_func_def(self, o: FuncDef) -> None:
is_dataclass_generated = (
self.analyzed and self.processing_dataclass and o.info.names[o.name].plugin_generated
)
if is_dataclass_generated and o.name != "__init__":
# Skip methods generated by the @dataclass decorator (except for __init__)
return
if (
self.is_private_name(o.name, o.fullname)
or self.is_not_in_all(o.name)
or (self.is_recorded_name(o.name) and not o.is_overload)
):
self.clear_decorators()
return
if self.is_top_level() and self._state not in (EMPTY, FUNC):
self.add("\n")
if not self.is_top_level():
self_inits = find_self_initializers(o)
for init, value in self_inits:
if init in self.method_names:
# Can't have both an attribute and a method/property with the same name.
continue
init_code = self.get_init(init, value)
if init_code:
self.add(init_code)
if self._current_class is not None:
if len(o.arguments):
self_var = o.arguments[0].variable.name
else:
self_var = "self"
class_info = ClassInfo(self._current_class.name, self_var)
else:
class_info = None
ctx = FunctionContext(
module_name=self.module_name,
name=o.name,
docstring=self._get_func_docstring(o),
is_abstract=o.abstract_status != NOT_ABSTRACT,
class_info=class_info,
)
self.record_name(o.name)
default_sig = self.get_default_function_sig(o, ctx)
sigs = self.get_signatures(default_sig, self.sig_generators, ctx)
for output in self.format_func_def(
sigs, is_coroutine=o.is_coroutine, decorators=self._decorators, docstring=ctx.docstring
):
self.add(output + "\n")
self.clear_decorators()
self._state = FUNC
def visit_decorator(self, o: Decorator) -> None:
if self.is_private_name(o.func.name, o.func.fullname):
return
self.process_decorator(o)
self.visit_func_def(o.func)
def process_decorator(self, o: Decorator) -> None:
"""Process a series of decorators.
Only preserve certain special decorators such as @abstractmethod.
"""
o.func.is_overload = False
for decorator in o.original_decorators:
if not isinstance(decorator, (NameExpr, MemberExpr)):
continue
qualname = get_qualified_name(decorator)
fullname = self.get_fullname(decorator)
if fullname in (
"builtins.property",
"builtins.staticmethod",
"builtins.classmethod",
"functools.cached_property",
):
self.add_decorator(qualname, require_name=True)
elif fullname in (
"asyncio.coroutine",
"asyncio.coroutines.coroutine",
"types.coroutine",
):
o.func.is_awaitable_coroutine = True
self.add_decorator(qualname, require_name=True)
elif fullname == "abc.abstractmethod":
self.add_decorator(qualname, require_name=True)
o.func.abstract_status = IS_ABSTRACT
elif fullname in (
"abc.abstractproperty",
"abc.abstractstaticmethod",
"abc.abstractclassmethod",
):
abc_module = qualname.rpartition(".")[0]
if not abc_module:
self.import_tracker.add_import("abc")
builtin_decorator_replacement = fullname[len("abc.abstract") :]
self.add_decorator(builtin_decorator_replacement, require_name=False)
self.add_decorator(f"{abc_module or 'abc'}.abstractmethod", require_name=True)
o.func.abstract_status = IS_ABSTRACT
elif fullname in OVERLOAD_NAMES:
self.add_decorator(qualname, require_name=True)
o.func.is_overload = True
elif qualname.endswith((".setter", ".deleter")):
self.add_decorator(qualname, require_name=False)
def get_fullname(self, expr: Expression) -> str:
"""Return the expression's full name."""
if (
self.analyzed
and isinstance(expr, (NameExpr, MemberExpr))
and expr.fullname
and not (isinstance(expr.node, Var) and expr.node.is_suppressed_import)
):
return expr.fullname
name = get_qualified_name(expr)
return self.resolve_name(name)
def visit_class_def(self, o: ClassDef) -> None:
self._current_class = o
self.method_names = find_method_names(o.defs.body)
sep: int | None = None
if self.is_top_level() and self._state != EMPTY:
sep = len(self._output)
self.add("\n")
decorators = self.get_class_decorators(o)
for d in decorators:
self.add(f"{self._indent}@{d}\n")
self.record_name(o.name)
base_types = self.get_base_types(o)
if base_types:
for base in base_types:
self.import_tracker.require_name(base)
if self.analyzed and o.info.is_enum:
self.processing_enum = True
if isinstance(o.metaclass, (NameExpr, MemberExpr)):
meta = o.metaclass.accept(AliasPrinter(self))
base_types.append("metaclass=" + meta)
elif self.analyzed and o.info.is_abstract and not o.info.is_protocol:
base_types.append("metaclass=abc.ABCMeta")
self.import_tracker.add_import("abc")
self.import_tracker.require_name("abc")
bases = f"({', '.join(base_types)})" if base_types else ""
self.add(f"{self._indent}class {o.name}{bases}:\n")
self.indent()
if self._include_docstrings and o.docstring:
docstring = mypy.util.quote_docstring(o.docstring)
self.add(f"{self._indent}{docstring}\n")
n = len(self._output)
self._vars.append([])
super().visit_class_def(o)
self.dedent()
self._vars.pop()
self._vars[-1].append(o.name)
if len(self._output) == n:
if self._state == EMPTY_CLASS and sep is not None:
self._output[sep] = ""
if not (self._include_docstrings and o.docstring):
self._output[-1] = self._output[-1][:-1] + " ...\n"
self._state = EMPTY_CLASS
else:
self._state = CLASS
self.method_names = set()
self.processing_dataclass = False
self.processing_enum = False
self._current_class = None
def get_base_types(self, cdef: ClassDef) -> list[str]:
"""Get list of base classes for a class."""
base_types: list[str] = []
p = AliasPrinter(self)
for base in cdef.base_type_exprs + cdef.removed_base_type_exprs:
if isinstance(base, (NameExpr, MemberExpr)):
if self.get_fullname(base) != "builtins.object":
base_types.append(get_qualified_name(base))
elif isinstance(base, IndexExpr):
base_types.append(base.accept(p))
elif isinstance(base, CallExpr):
# namedtuple(typename, fields), NamedTuple(typename, fields) calls can
# be used as a base class. The first argument is a string literal that
# is usually the same as the class name.
#
# Note:
# A call-based named tuple as a base class cannot be safely converted to
# a class-based NamedTuple definition because class attributes defined
# in the body of the class inheriting from the named tuple call are not
# namedtuple fields at runtime.
if self.is_namedtuple(base):
nt_fields = self._get_namedtuple_fields(base)
assert isinstance(base.args[0], StrExpr)
typename = base.args[0].value
if nt_fields is None:
# Invalid namedtuple() call, cannot determine fields
base_types.append(self.add_name("_typeshed.Incomplete"))
continue
fields_str = ", ".join(f"({f!r}, {t})" for f, t in nt_fields)
namedtuple_name = self.add_name("typing.NamedTuple")
base_types.append(f"{namedtuple_name}({typename!r}, [{fields_str}])")
elif self.is_typed_namedtuple(base):
base_types.append(base.accept(p))
else:
# At this point, we don't know what the base class is, so we
# just use Incomplete as the base class.
base_types.append(self.add_name("_typeshed.Incomplete"))
for name, value in cdef.keywords.items():
if name == "metaclass":
continue # handled separately
base_types.append(f"{name}={value.accept(p)}")
return base_types
def get_class_decorators(self, cdef: ClassDef) -> list[str]:
decorators: list[str] = []
p = AliasPrinter(self)
for d in cdef.decorators:
if self.is_dataclass(d):
decorators.append(d.accept(p))
self.import_tracker.require_name(get_qualified_name(d))
self.processing_dataclass = True
return decorators
def is_dataclass(self, expr: Expression) -> bool:
if isinstance(expr, CallExpr):
expr = expr.callee
return self.get_fullname(expr) == "dataclasses.dataclass"
def visit_block(self, o: Block) -> None:
# Unreachable statements may be partially uninitialized and that may
# cause trouble.
if not o.is_unreachable:
super().visit_block(o)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
foundl = []
for lvalue in o.lvalues:
if isinstance(lvalue, NameExpr) and isinstance(o.rvalue, CallExpr):
if self.is_namedtuple(o.rvalue) or self.is_typed_namedtuple(o.rvalue):
self.process_namedtuple(lvalue, o.rvalue)
foundl.append(False) # state is updated in process_namedtuple
continue
if self.is_typeddict(o.rvalue):
self.process_typeddict(lvalue, o.rvalue)
foundl.append(False) # state is updated in process_typeddict
continue
if (
isinstance(lvalue, NameExpr)
and not self.is_private_name(lvalue.name)
# it is never an alias with explicit annotation
and not o.unanalyzed_type
and self.is_alias_expression(o.rvalue)
):
self.process_typealias(lvalue, o.rvalue)
continue
if isinstance(lvalue, (TupleExpr, ListExpr)):
items = lvalue.items
if isinstance(o.unanalyzed_type, TupleType): # type: ignore[misc]
annotations: Iterable[Type | None] = o.unanalyzed_type.items
else:
annotations = [None] * len(items)
else:
items = [lvalue]
annotations = [o.unanalyzed_type]
sep = False
found = False
for item, annotation in zip(items, annotations):
if isinstance(item, NameExpr):
init = self.get_init(item.name, o.rvalue, annotation)
if init:
found = True
if not sep and self.is_top_level() and self._state not in (EMPTY, VAR):
init = "\n" + init
sep = True
self.add(init)
self.record_name(item.name)
foundl.append(found)
if all(foundl):
self._state = VAR
def is_namedtuple(self, expr: CallExpr) -> bool:
return self.get_fullname(expr.callee) == "collections.namedtuple"
def is_typed_namedtuple(self, expr: CallExpr) -> bool:
return self.get_fullname(expr.callee) in TYPED_NAMEDTUPLE_NAMES
def _get_namedtuple_fields(self, call: CallExpr) -> list[tuple[str, str]] | None:
if self.is_namedtuple(call):
fields_arg = call.args[1]
if isinstance(fields_arg, StrExpr):
field_names = fields_arg.value.replace(",", " ").split()
elif isinstance(fields_arg, (ListExpr, TupleExpr)):
field_names = []
for field in fields_arg.items:
if not isinstance(field, StrExpr):
return None
field_names.append(field.value)
else:
return None # Invalid namedtuple fields type
if field_names:
incomplete = self.add_name("_typeshed.Incomplete")
return [(field_name, incomplete) for field_name in field_names]
else:
return []
elif self.is_typed_namedtuple(call):
fields_arg = call.args[1]
if not isinstance(fields_arg, (ListExpr, TupleExpr)):
return None
fields: list[tuple[str, str]] = []
p = AliasPrinter(self)
for field in fields_arg.items:
if not (isinstance(field, TupleExpr) and len(field.items) == 2):
return None
field_name, field_type = field.items
if not isinstance(field_name, StrExpr):
return None
fields.append((field_name.value, field_type.accept(p)))
return fields
else:
return None # Not a named tuple call
def process_namedtuple(self, lvalue: NameExpr, rvalue: CallExpr) -> None:
if self._state == CLASS:
self.add("\n")
if not isinstance(rvalue.args[0], StrExpr):
self.annotate_as_incomplete(lvalue)
return
fields = self._get_namedtuple_fields(rvalue)
if fields is None:
self.annotate_as_incomplete(lvalue)
return
bases = self.add_name("typing.NamedTuple")
# TODO: Add support for generic NamedTuples. Requires `Generic` as base class.
class_def = f"{self._indent}class {lvalue.name}({bases}):"
if len(fields) == 0:
self.add(f"{class_def} ...\n")
self._state = EMPTY_CLASS
else:
if self._state not in (EMPTY, CLASS):
self.add("\n")
self.add(f"{class_def}\n")
for f_name, f_type in fields:
self.add(f"{self._indent} {f_name}: {f_type}\n")
self._state = CLASS
def is_typeddict(self, expr: CallExpr) -> bool:
return self.get_fullname(expr.callee) in TPDICT_NAMES
def process_typeddict(self, lvalue: NameExpr, rvalue: CallExpr) -> None:
if self._state == CLASS:
self.add("\n")
if not isinstance(rvalue.args[0], StrExpr):
self.annotate_as_incomplete(lvalue)
return
items: list[tuple[str, Expression]] = []
total: Expression | None = None
if len(rvalue.args) > 1 and rvalue.arg_kinds[1] == ARG_POS:
if not isinstance(rvalue.args[1], DictExpr):
self.annotate_as_incomplete(lvalue)
return
for attr_name, attr_type in rvalue.args[1].items:
if not isinstance(attr_name, StrExpr):
self.annotate_as_incomplete(lvalue)
return
items.append((attr_name.value, attr_type))
if len(rvalue.args) > 2:
if rvalue.arg_kinds[2] != ARG_NAMED or rvalue.arg_names[2] != "total":
self.annotate_as_incomplete(lvalue)
return
total = rvalue.args[2]
else:
for arg_name, arg in zip(rvalue.arg_names[1:], rvalue.args[1:]):
if not isinstance(arg_name, str):
self.annotate_as_incomplete(lvalue)
return
if arg_name == "total":
total = arg
else:
items.append((arg_name, arg))
p = AliasPrinter(self)
if any(not key.isidentifier() or keyword.iskeyword(key) for key, _ in items):
# Keep the call syntax if there are non-identifier or reserved keyword keys.
self.add(f"{self._indent}{lvalue.name} = {rvalue.accept(p)}\n")
self._state = VAR
else:
bases = self.add_name("typing_extensions.TypedDict")
# TODO: Add support for generic TypedDicts. Requires `Generic` as base class.
if total is not None:
bases += f", total={total.accept(p)}"
class_def = f"{self._indent}class {lvalue.name}({bases}):"
if len(items) == 0:
self.add(f"{class_def} ...\n")
self._state = EMPTY_CLASS
else:
if self._state not in (EMPTY, CLASS):
self.add("\n")
self.add(f"{class_def}\n")
for key, key_type in items:
self.add(f"{self._indent} {key}: {key_type.accept(p)}\n")