-
Notifications
You must be signed in to change notification settings - Fork 2
/
pipeline_generator.py
242 lines (214 loc) · 12.5 KB
/
pipeline_generator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
import argparse
import os
import pickle
import sys
import time
import nbformat as nbf
import pandas as pd
from tqdm import tqdm
sys.path.insert(0, '../')
from operations.api import KGFarm
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import f1_score
from helpers.helper import connect_to_stardog, time_taken
os.chdir('../')
class PipelineGenerator:
def __init__(self, pipeline_name: str, port: object = 5820, database: str = 'kgfarm_test', show_connection_status: bool = False):
self.config = connect_to_stardog(port, database, show_connection_status)
self.notebook = nbf.v4.new_notebook()
self.kgfarm = KGFarm(mode='Automatic', show_connection_status=False)
self.name = pipeline_name + '.ipynb'
self.cells = list()
def __add(self, info: str, info_type: str = 'code'):
if info_type == 'markdown':
self.cells.append(nbf.v4.new_markdown_cell(info))
elif info_type == 'code':
self.cells.append(nbf.v4.new_code_cell(info))
def __print_dataframe(self):
code = """entity_df = pd.read_csv(feature_info.iloc[0].File_source)\nprint(feature_info.iloc[0].Physical_table)\nentity_df"""
self.__add(code)
def add_documentation(self):
heading = """### This pipeline was generated by <font color = 'green'>KGFarm's Pipeline Generator</font> <font color = 'red'>(in {})</font>\n<b>Feel free to edit :)</b>"""
self.__add(heading, info_type='markdown')
def get_notebook_name(self):
return self.name
def write_to_notebook(self, time_taken_to_generate_notebook: str):
documentation = self.cells[0]
source = documentation.get('source')
source = source.format(time_taken_to_generate_notebook[:-3]+' seconds')
documentation['source'] = source
del self.cells[0]
self.cells.insert(0, documentation)
# write to notebook
self.notebook['cells'] = self.cells
nbf.write(self.notebook, 'operations/out/pipelines/'+self.name)
print('pipeline saved at {}/operations/out/pipelines/{}'.format(os.getcwd(), self.name))
def instantiate_kgfarm(self):
code = """import os\nos.chdir('../../../')\nfrom operations.api import KGFarm\nkgfarm = KGFarm()"""
self.__add(code)
def import_libraries(self):
code = """import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import f1_score"""
self.__add(code)
def identify_features(self, entity_name: str, target_name: str):
feature_info = self.kgfarm.identify_features(entity=entity_name, target=target_name)
print(f'• identifying features for {entity_name}', end=' ')
if len(feature_info):
print('done.')
code = f"""feature_info = kgfarm.identify_features(entity='{entity_name}', target='{target_name}')\nfeature_info"""
self.__add(code)
self.__print_dataframe()
return feature_info
def search_enrichment_options(self, feature_info: pd.DataFrame):
print('• attempting to enrich data (via point-in-time correct join)', end=' ')
entity_df = pd.read_csv(feature_info.iloc[0].File_source)
enrichment_info = self.kgfarm.search_enrichment_options(entity_df=entity_df)
if len(enrichment_info):
code = """enrichment_info = kgfarm.search_enrichment_options(entity_df)\nenrichment_info"""
self.__add(code)
return enrichment_info, entity_df
def enrich(self, enrich_info: tuple):
entity_df = self.kgfarm.enrich(enrichment_info=enrich_info[0].iloc[0], entity_df=enrich_info[1])
if len(entity_df):
print('done.')
code = """entity_df = kgfarm.enrich(enrichment_info.iloc[0], entity_df)\nentity_df"""
self.__add(code)
return entity_df
def search_data_cleaning_operations(self, entity_df: pd.DataFrame):
print('• cleaning messy data (if any)', end=' ')
cleaning_info = self.kgfarm.recommend_cleaning_operations(entity_df=entity_df, visualize_missing_data=False)
if len(entity_df):
code = """cleaning_info = kgfarm.recommend_cleaning_operations(entity_df)\ncleaning_info"""
self.__add(code)
return entity_df, cleaning_info
def clean(self, cleaning_info: tuple):
entity_df = self.kgfarm.clean(entity_df=cleaning_info[0], cleaning_info=cleaning_info[1].iloc[0])
if len(entity_df):
print('done.')
code = """entity_df = kgfarm.clean(entity_df, cleaning_info.iloc[0])\nentity_df"""
self.__add(code)
return entity_df
def search_transformations(self, entity_df: pd.DataFrame):
print('• applying transformations', end=' ')
transformation_info = self.kgfarm.recommend_data_transformations(entity_df=entity_df)
if len(transformation_info):
print('found {} transformations'.format(len(transformation_info)))
code = """transformation_info = kgfarm.recommend_data_transformations(entity_df)\ntransformation_info"""
self.__add(code)
return transformation_info, entity_df
def apply_data_transformations(self, transformation_info: tuple):
# TODO: manage feature-leakage for scaling and normalization
enrich_df = transformation_info[1]
if len(transformation_info):
print('done.')
code: str = ''
for transformation in range(len(transformation_info[0])):
enrich_df, _ = self.kgfarm.apply_data_transformation(transformation_info[0].iloc[transformation],
entity_df=enrich_df)
code = code + """entity_df, _ = kgfarm.apply_data_transformation(transformation_info.iloc[{}], entity_df)\n""".\
format(transformation)
code = code + """entity_df"""
self.__add(code)
return enrich_df
def select_features(self, entity_df: pd.DataFrame, dependent_variable: str):
print('• selecting features', end=' ')
X, y = self.kgfarm.select_features(entity_df=entity_df, dependent_variable=dependent_variable,
plot_correlation=False, plot_anova_test=False)
if len(X) & len(y):
print('done.')
code = """X, y = kgfarm.select_features(entity_df, dependent_variable='{}',\n""".format(dependent_variable) + \
"""plot_correlation=True, plot_anova_test=True, show_f_value=True)\nX"""
self.__add(code)
return X, y
def split_data(self, data: tuple):
print('• building train and test sets for model training done.')
X_train, X_test, y_train, y_test = train_test_split(data[0], data[1], test_size=0.20, random_state=0)
code = """X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)"""
self.__add(code)
return X_train, X_test, y_train, y_test
def train_model(self, machine_learning_problem: str, data: tuple):
X_train = data[0]
X_test = data[1]
y_train = data[2]
y_test = data[3]
if machine_learning_problem == 'classification':
print('• training classifiers', end=' ')
# instantiate the models
random_forest_classifier = RandomForestClassifier()
gradient_boosting_classifier = GradientBoostingClassifier()
naive_bayes_classifier = GaussianNB()
# fit the models on data
random_forest_classifier.fit(X_train, y_train)
gradient_boosting_classifier.fit(X_train, y_train)
naive_bayes_classifier.fit(X_train, y_train)
# add info to notebook
models = """random_forest_classifier = RandomForestClassifier()\ngradient_boosting_classifier = GradientBoostingClassifier()\nnaive_bayes_classifier = GaussianNB()\n"""
fit = """random_forest_classifier.fit(X_train, y_train)\ngradient_boosting_classifier.fit(X_train, y_train)\nnaive_bayes_classifier.fit(X_train, y_train)"""
self.__add(models + fit)
print('done.')
return random_forest_classifier, X_test, y_test
else:
error = '{} not supported yet'.format(machine_learning_problem)
raise ValueError(error)
def evaluate_model(self, evaluation_info: tuple):
model = evaluation_info[0]
XTest = evaluation_info[1]
y_true = evaluation_info[2]
print('• evaluating model', end=' ')
y_pred = model.predict(XTest)
f1 = round(f1_score(y_true, y_pred), 3)
evaluate = """y_pred = random_forest_classifier.predict(X_test)\nf1_random_forest_classifier = round(f1_score(y_test, y_pred), 3)\ny_pred = gradient_boosting_classifier.predict(X_test)\nf1_gradient_boosting_classifier = round(f1_score(y_test, y_pred), 3)\ny_pred = naive_bayes_classifier.predict(X_test)\nf1_naive_bayes_classifier = round(f1_score(y_test, y_pred), 3)\n"""
plot = """from helpers.helper import plot_scores\nscores = {'Random forest classifier': f1_random_forest_classifier,
'Gradient boosting classifier': f1_gradient_boosting_classifier,
'Naive bayes classifier': f1_naive_bayes_classifier}\nfor classifier, f1 in scores.items():\n\tprint(f"{'{} (f1-score):'.format(classifier):<42}{f1:>1}")"""
print('done.\n', 'F1 Score = {}'.format(f1))
self.__add(evaluate + plot)
with open('operations/out/models/f1-{}.pkl'.format(f1), 'wb') as f:
pickle.dump(model, f)
print('model saved at {}'.format(os.getcwd()+'/operations/out/models/f1-{}.pkl'.format(f1)))
def run(pipeline_name, ml_task: str, entity_name: str, target_name: str):
if pipeline_name is None:
pipeline_name = 'my_pipeline'
pipeline_generator = PipelineGenerator(pipeline_name=pipeline_name)
print('Generating {}...'.format(pipeline_generator.get_notebook_name()))
start = time.time()
pipeline_generator.add_documentation()
if not os.path.exists('operations/out/pipelines'):
os.makedirs('operations/out/pipelines')
if not os.path.exists('operations/out/models'):
os.makedirs('operations/out/models')
print('\n• setting up pipeline template done.')
_ = [step for step in tqdm([pipeline_generator.instantiate_kgfarm(),
pipeline_generator.import_libraries(),
pipeline_generator.evaluate_model(
evaluation_info=pipeline_generator.train_model(
machine_learning_problem=ml_task, data=pipeline_generator.split_data(
data=pipeline_generator.select_features(dependent_variable=target_name,
entity_df=pipeline_generator.apply_data_transformations(
transformation_info=pipeline_generator.search_transformations(
entity_df=pipeline_generator.clean(
cleaning_info=pipeline_generator.search_data_cleaning_operations(
entity_df=pipeline_generator.enrich(
enrich_info=pipeline_generator.search_enrichment_options(
feature_info=pipeline_generator.identify_features(
entity_name=entity_name, target_name=target_name)))))))))))])]
pipeline_generator.write_to_notebook(time_taken(start, time.time()))
print('Done in ', time_taken(start, time.time()))
if __name__ == "__main__":
# parse user input
ap = argparse.ArgumentParser()
ap.add_argument('-pipeline', '--pipeline', required=False,
help='name of your new pipeline')
ap.add_argument('-task', '--task', required=True,
help='classification, regression, etc.')
ap.add_argument('-entity', '--entity', required=True,
help='entity associated with the task')
ap.add_argument('-target', '--target', required=True,
help='target i.e. the dependent variable')
args = vars(ap.parse_args())
pipeline = args['pipeline']
task = args['task']
entity = args['entity']
target = args['target']
run(pipeline, task, entity, target)