Skip to content

Commit

Permalink
[MNT] Updating pre-commit hooks and corresponding changes (#7109)
Browse files Browse the repository at this point in the history
Maintenance only changes.

1. pre-commit autoupdate
2. pre-commit run --all-files

`ruff` detected a bunch of problems in different notebooks and solved
most of them manually. Rest of them are done manually.
  • Loading branch information
yarnabrina authored Sep 14, 2024
1 parent 73bc154 commit 67acec5
Show file tree
Hide file tree
Showing 14 changed files with 93 additions and 109 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ repos:
- id: trailing-whitespace

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.7
rev: v0.6.4
hooks:
- id: ruff-format
- id: ruff
Expand Down
4 changes: 3 additions & 1 deletion examples/01_forecasting.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -5648,7 +5648,9 @@
"fh_ins = -np.arange(len(y_train)) # in-sample forecasting horizon\n",
"y_pred = forecaster.fit(y_train).predict(fh=fh_ins)\n",
"\n",
"plot_series(y_train, y_pred, yt, labels=[\"y_train\", \"fitted linear trend\", \"residuals\"]);"
"plot_series(\n",
" y_train, y_pred, yt, labels=[\"y_train\", \"fitted linear trend\", \"residuals\"]\n",
");"
]
},
{
Expand Down
3 changes: 1 addition & 2 deletions examples/01a_forecasting_sklearn.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,7 @@
"\n",
"\n",
"def split_into_train_test(data, in_num, fh):\n",
" \"\"\"\n",
" Splits the series into train and test sets.\n",
" \"\"\"Split the series into train and test sets.\n",
"\n",
" Each step takes multiple points as inputs\n",
" :param data: an individual TS\n",
Expand Down
10 changes: 5 additions & 5 deletions examples/01c_forecasting_hierarchical_global.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -4527,9 +4527,10 @@
"metadata": {},
"outputs": [],
"source": [
"from sktime.utils._testing.hierarchical import _make_hierarchical\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from sktime.utils._testing.hierarchical import _make_hierarchical\n",
"\n",
"data = _make_hierarchical(\n",
" hierarchy_levels=(100, 1), max_timepoints=10, min_timepoints=10, n_columns=1\n",
")\n",
Expand Down Expand Up @@ -5278,9 +5279,10 @@
"metadata": {},
"outputs": [],
"source": [
"from sktime.utils._testing.hierarchical import _make_hierarchical\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from sktime.utils._testing.hierarchical import _make_hierarchical\n",
"\n",
"data = _make_hierarchical(\n",
" hierarchy_levels=(100, 1), max_timepoints=10, min_timepoints=10, n_columns=2\n",
")\n",
Expand All @@ -5290,9 +5292,7 @@
"X_train, X_test, y_train, y_test = train_test_split(\n",
" x, y, test_size=0.1, train_size=0.9, shuffle=False\n",
")\n",
"y_test = y_test.groupby(level=0).apply(\n",
" lambda x: x.droplevel(0).iloc[:-3]\n",
")"
"y_test = y_test.groupby(level=0).apply(lambda x: x.droplevel(0).iloc[:-3])"
]
},
{
Expand Down
3 changes: 1 addition & 2 deletions examples/05_graphical_pipelines.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,6 @@
}
],
"source": [
"\n",
"general_pipeline = Pipeline()\n",
"differencer = Differencer()\n",
"\n",
Expand Down Expand Up @@ -392,7 +391,7 @@
],
"source": [
"pipe = Pipeline()\n",
"pipe.set_config(warnings=\"off\")\n"
"pipe.set_config(warnings=\"off\")"
]
},
{
Expand Down
10 changes: 8 additions & 2 deletions examples/06_distances_kernels_alignment.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -946,6 +946,7 @@
"source": [
"# example 1: flat Gaussian RBF kernel between time series\n",
"from sklearn.gaussian_process.kernels import RBF\n",
"\n",
"from sktime.dists_kernels import FlatDist\n",
"\n",
"flat_gaussian_tskernel = FlatDist(RBF(length_scale=10))\n",
Expand Down Expand Up @@ -1329,7 +1330,11 @@
"source": [
"from sktime.utils.plotting import plot_series\n",
"\n",
"plot_series(X1_al.reset_index(drop=True), X2_al.reset_index(drop=True), labels=[\"leaf_1\", \"leaf_2\"])"
"plot_series(\n",
" X1_al.reset_index(drop=True),\n",
" X2_al.reset_index(drop=True),\n",
" labels=[\"leaf_1\", \"leaf_2\"],\n",
")"
]
},
{
Expand Down Expand Up @@ -1363,7 +1368,8 @@
}
],
"source": [
"# the AlignerDTW class (based on dtw-python) doesn't just align, it also produces a distance\n",
"# the AlignerDTW class (based on dtw-python) doesn't just align\n",
"# it also produces a distance\n",
"aligner.get_tags()"
]
},
Expand Down
25 changes: 12 additions & 13 deletions examples/forecasting/window_splitters.ipynb

Large diffs are not rendered by default.

70 changes: 32 additions & 38 deletions examples/transformation/fracdiff/example_exercise.ipynb

Large diffs are not rendered by default.

11 changes: 4 additions & 7 deletions examples/transformation/fracdiff/example_howto.ipynb

Large diffs are not rendered by default.

36 changes: 14 additions & 22 deletions examples/transformation/fracdiff/example_prado.ipynb

Large diffs are not rendered by default.

6 changes: 2 additions & 4 deletions examples/transformation/interpolation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -123,15 +123,13 @@
}
],
"source": [
"# randomly cut the data series in-place\n",
"\n",
"\n",
"def random_cut(df):\n",
" \"\"\"Randomly cut the data series in-place.\"\"\"\n",
" for row_i in range(df.shape[0]):\n",
" for dim_i in range(df.shape[1]):\n",
" ts = df.iloc[row_i][f\"dim_{dim_i}\"]\n",
" df.iloc[row_i][f\"dim_{dim_i}\"] = pd.Series(\n",
" ts.tolist()[: random.randint(len(ts) - 5, len(ts) - 3)]\n",
" ts.tolist()[: random.randint(len(ts) - 5, len(ts) - 3)] # noqa: S311\n",
" ) # here is a problem\n",
"\n",
"\n",
Expand Down
4 changes: 2 additions & 2 deletions examples/transformation/minirocket.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@
"from sklearn.pipeline import make_pipeline\n",
"from sklearn.preprocessing import StandardScaler\n",
"\n",
"from sktime.datasets import load_arrow_head # univariate dataset\n",
"from sktime.datasets import load_basic_motions # multivariate dataset\n",
"from sktime.datasets import (\n",
" load_arrow_head, # univariate dataset\n",
" load_basic_motions, # multivariate dataset\n",
" load_japanese_vowels, # multivariate dataset with unequal length\n",
")\n",
"from sktime.transformations.panel.rocket import (\n",
Expand Down
6 changes: 4 additions & 2 deletions examples/transformation/rocket.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,10 @@
"from sklearn.linear_model import RidgeClassifierCV\n",
"from sklearn.pipeline import make_pipeline\n",
"\n",
"from sktime.datasets import load_arrow_head # univariate dataset\n",
"from sktime.datasets import load_basic_motions # multivariate dataset\n",
"from sktime.datasets import (\n",
" load_arrow_head, # univariate dataset\n",
" load_basic_motions, # multivariate dataset\n",
")\n",
"from sktime.transformations.panel.rocket import Rocket"
]
},
Expand Down
12 changes: 4 additions & 8 deletions examples/transformation/signature_method.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -221,9 +221,9 @@
")\n",
"\n",
"# The simply transform the stream data\n",
"print(\"Raw data shape is: {}\".format(train_x.shape))\n",
"print(f\"Raw data shape is: {train_x.shape}\")\n",
"train_signature_x = signature_transform.fit_transform(train_x)\n",
"print(\"Signature shape is: {}\".format(train_signature_x.shape))"
"print(f\"Signature shape is: {train_signature_x.shape}\")"
]
},
{
Expand Down Expand Up @@ -261,7 +261,7 @@
"# Evaluate\n",
"test_signature_x = signature_transform.transform(test_x)\n",
"test_pred = model.predict(test_signature_x)\n",
"print(\"Accuracy: {:.3f}%\".format(accuracy_score(test_y, test_pred)))"
"print(f\"Accuracy: {accuracy_score(test_y, test_pred):.3f}%\")"
]
},
{
Expand Down Expand Up @@ -341,11 +341,7 @@
"test_preds = best_classifier.predict(test_x)\n",
"train_score = accuracy_score(train_y, train_preds)\n",
"test_score = accuracy_score(test_y, test_preds)\n",
"print(\n",
" \"Train acc: {:.3f}% | Test acc: {:.3f}%\".format(\n",
" train_score * 100, test_score * 100\n",
" )\n",
")"
"print(f\"Train acc: {train_score * 100:.3f}% | Test acc: {test_score * 100:.3f}%\")"
]
},
{
Expand Down

0 comments on commit 67acec5

Please sign in to comment.