Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
wikke committed Aug 29, 2017
1 parent a8a1d32 commit 2c4be38
Show file tree
Hide file tree
Showing 5 changed files with 937 additions and 10 deletions.
76 changes: 76 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
# Time Series Predictions

> Play with time
## 1. Shampoo Sales Prediction

> `ShampooSales.ipynb`
sales goes like this, need to predict according to history.

<img src='./assets/sales.png'>

A wonderful tutorial to convert time series prediction to supervised problem: [Time Series Forecasting as Supervised Learning](https://machinelearningmastery.com/time-series-forecasting-supervised-learning/)

### Result

best fit before overfitting:

<img src='./assets/train.png'>

### Stateful LSTM

Core code

```
model = Sequential()
model.add(LSTM(4, batch_input_shape=(BATCH_SIZE, X.shape[1], X.shape[2]), stateful=True))
model.add(Dropout(0.5))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adadelta')
# way 1
for i in range(EPOCHS):
model.fit(X, y, epochs=1, shuffle=False, batch_size=BATCH_SIZE)
model.reset_states()
# way 2
class StatusResetCallback(Callback):
def on_batch_begin(self, batch, logs={}):
self.model.reset_states()
model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE,
shuffle=False, callbacks=[StatusResetCallback()])
```

## 2. Stateful LSTM in Keras

> `StatefulLSTM.ipynb`
Learning from [Stateful LSTM in Keras](http://philipperemy.github.io/keras-stateful-lstm/) by Philippe Remy, which is a wonderful and simple tutorial. The composed dataset is simple and clean:

```
X y
1 0 0 ... 0 1
0 0 0 ... 0 0
0 0 0 ... 0 0
1 0 0 ... 0 1
1 0 0 ... 0 1
...
```

Obviously, if the first of X seq is 1, y = 1, else 0. We will see if the 1 status will pass along to predict the result.

### Stateless LSTM Can't Converge

```
model = Sequential()
model.add(LSTM(LSTM_UNITS, input_shape=X_train.shape[1:], return_sequences=False, stateful=False))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
```

### Stateful LSTM

it works. Talk is cheap, see the code.
22 changes: 12 additions & 10 deletions TimeSeries.ipynb → ShampooSales.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import numpy as np\n",
Expand Down Expand Up @@ -243,8 +245,8 @@
}
],
"source": [
"BATCH_SIEE = 4\n",
"train_len = len(train_scaled) // BATCH_SIEE * BATCH_SIEE\n",
"BATCH_SIZE = 4\n",
"train_len = len(train_scaled) // BATCH_SIZE * BATCH_SIZE\n",
"train_scaled = train_scaled[:train_len]\n",
"train_scaled.shape"
]
Expand Down Expand Up @@ -281,10 +283,10 @@
"source": [
"def get_model():\n",
" model = Sequential()\n",
" model.add(LSTM(4, batch_input_shape=(BATCH_SIEE, X.shape[1], X.shape[2]), stateful=True))\n",
" model.add(LSTM(4, batch_input_shape=(BATCH_SIZE, X.shape[1], X.shape[2]), stateful=True))\n",
"\n",
"# model.add(LSTM(16, batch_input_shape=(BATCH_SIEE, X.shape[1], X.shape[2]), stateful=True, return_sequences=True))\n",
"# model.add(LSTM(16, batch_input_shape=(BATCH_SIEE, X.shape[1], X.shape[2]), stateful=True, return_sequences=False))\n",
"# model.add(LSTM(16, batch_input_shape=(BATCH_SIZE, X.shape[1], X.shape[2]), stateful=True, return_sequences=True))\n",
"# model.add(LSTM(16, batch_input_shape=(BATCH_SIZE, X.shape[1], X.shape[2]), stateful=True, return_sequences=False))\n",
"\n",
" model.add(Dropout(0.5))\n",
" model.add(Dense(1, activation='linear'))\n",
Expand All @@ -303,13 +305,13 @@
"outputs": [],
"source": [
"def make_prediction(model, X):\n",
" gap = BATCH_SIEE - X.shape[0] % BATCH_SIEE\n",
" gap = BATCH_SIZE - X.shape[0] % BATCH_SIZE\n",
" if gap != 0:\n",
" X = np.concatenate([X, X[:gap]], axis=0)\n",
" \n",
" X_reshape = X.reshape((X.shape[0], 1, X.shape[1]))\n",
" \n",
" y_pred = model.predict(X_reshape, batch_size=BATCH_SIEE)\n",
" y_pred = model.predict(X_reshape, batch_size=BATCH_SIZE)\n",
" \n",
" data_scaled = np.concatenate([X, y_pred], axis=1)\n",
" data = scaler.inverse_transform(data_scaled)\n",
Expand Down Expand Up @@ -606,7 +608,7 @@
],
"source": [
"for i in range(EPOCHS):\n",
" model.fit(X, y, epochs=1, shuffle=False, verbose=0, batch_size=BATCH_SIEE)\n",
" model.fit(X, y, epochs=1, shuffle=False, verbose=0, batch_size=BATCH_SIZE)\n",
" model.reset_states()\n",
" \n",
" if (i+1) % 500 == 0:\n",
Expand Down Expand Up @@ -671,7 +673,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
"version": "3.6.1"
}
},
"nbformat": 4,
Expand Down
Loading

0 comments on commit 2c4be38

Please sign in to comment.