Skip to content

Commit

Permalink
fixed formatting of examples/fashion-mnist
Browse files Browse the repository at this point in the history
  • Loading branch information
AbhinavTuli committed Aug 21, 2020
1 parent d67187f commit 94cc2d8
Show file tree
Hide file tree
Showing 4 changed files with 135 additions and 126 deletions.
84 changes: 44 additions & 40 deletions examples/fashion-mnist/train_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import torch.nn.functional as F
import torch.optim as optim


class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
Expand All @@ -20,46 +21,48 @@ def forward(self, x):
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x,dim=1)

def train(model,train_loader,optimizer):
model.train()
for batch_idx, batch in enumerate(train_loader):
data=batch["data"]
data=torch.unsqueeze(data,1)
labels=batch["labels"]
labels = labels.type(torch.LongTensor)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, labels)
loss.backward()
optimizer.step()

def test(model,test_loader):
model.eval()
print("Evaluating on Test Set")
test_loss = correct = 0
with torch.no_grad():
for batch in test_loader:
data=batch["data"]
data=torch.unsqueeze(data,1)
labels=batch["labels"]
labels = labels.type(torch.LongTensor)
output = model(data)
test_loss += F.nll_loss(output, labels, reduction='sum').item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(labels.data.view_as(pred)).sum()

test_loss /= len(test_loader.dataset)
print('Test set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
return F.log_softmax(x, dim=1)


def train(model, train_loader, optimizer):
model.train()
for batch_idx, batch in enumerate(train_loader):
data = batch["data"]
data = torch.unsqueeze(data, 1)
labels = batch["labels"]
labels = labels.type(torch.LongTensor)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, labels)
loss.backward()
optimizer.step()


def test(model, test_loader):
model.eval()
print("Evaluating on Test Set")
test_loss = correct = 0
with torch.no_grad():
for batch in test_loader:
data = batch["data"]
data = torch.unsqueeze(data, 1)
labels = batch["labels"]
labels = labels.type(torch.LongTensor)
output = model(data)
test_loss += F.nll_loss(output, labels, reduction='sum').item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(labels.data.view_as(pred)).sum()

test_loss /= len(test_loader.dataset)
print('Test set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))


def main():
EPOCHS = 3
BATCH_SIZE = 64
LEARNING_RATE = 0.01
MOMENTUM = 0.5

torch.backends.cudnn.enabled = False
random_seed = 2
torch.manual_seed(random_seed)
Expand All @@ -72,19 +75,20 @@ def main():

# Splitting back into the original train and test sets, instead of random split
train_dataset = torch.utils.data.Subset(ds, range(60000))
test_dataset = torch.utils.data.Subset(ds, range(60000,70000))
test_dataset = torch.utils.data.Subset(ds, range(60000, 70000))

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE,collate_fn=ds.collate_fn)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE,collate_fn=ds.collate_fn)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, collate_fn=ds.collate_fn)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE, collate_fn=ds.collate_fn)

model = CNN()
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM)

for epoch in range(EPOCHS):
print("Starting Training Epoch {}".format(epoch))
train(model,train_loader,optimizer)
train(model, train_loader, optimizer)
print("Training Epoch {} finished\n".format(epoch))
test(model,test_loader)
test(model, test_loader)


if __name__ == "__main__":
main()
main()
48 changes: 26 additions & 22 deletions examples/fashion-mnist/train_tf_fit.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,28 @@
from hub import dataset
import tensorflow as tf


def create_CNN():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
return model
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28, 28, 1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
return model


def to_model_fit(item):
data=item["data"]
data=tf.expand_dims(data, axis=2)
labels=item["labels"]
return (data,labels)
data = item["data"]
data = tf.expand_dims(data, axis=2)
labels = item["labels"]
return (data, labels)


def main():
BATCH_SIZE = 64
Expand All @@ -31,20 +34,21 @@ def main():
# transform into Tensorflow dataset
ds = ds.to_tensorflow()

#converting ds so that it can be directly used in model.fit
ds=ds.map(lambda x: to_model_fit(x))
# converting ds so that it can be directly used in model.fit
ds = ds.map(lambda x: to_model_fit(x))

# Splitting back into the original train and test sets
train_dataset = ds.take(60000)
train_dataset = ds.take(60000)
test_dataset = ds.skip(60000)

train_dataset=train_dataset.batch(BATCH_SIZE)
test_dataset=test_dataset.batch(BATCH_SIZE)
train_dataset = train_dataset.batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)

model = create_CNN()
# model.summary()
model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(train_dataset, epochs=EPOCHS, validation_data=test_dataset, validation_steps=1)


if __name__ == "__main__":
main()
128 changes: 65 additions & 63 deletions examples/fashion-mnist/train_tf_gradient_tape.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,78 +3,80 @@
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam


def create_CNN():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
return model

def train(model,train_dataset,optimizer,loss_fn,train_acc_metric):
for batch in train_dataset:
with tf.GradientTape() as tape:
pred = model(tf.expand_dims(batch["data"], axis=3))
loss=loss_fn(batch["labels"], pred)

# calculate gradients and update the model weights
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_acc_metric.update_state(batch["labels"], pred)

train_acc = train_acc_metric.result()
print("Training acc: %.4f" % (float(train_acc),))
train_acc_metric.reset_states()

def test(model,test_dataset,test_acc_metric):
print("Evaluating on Test Set")
for batch in test_dataset:
pred = model(tf.expand_dims(batch["data"], axis=3), training=False)
test_acc_metric.update_state(batch["labels"],pred)

test_acc = test_acc_metric.result()
print("Test acc: %.4f" % (float(test_acc),))
test_acc_metric.reset_states()
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28, 28, 1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
return model


def train(model, train_dataset, optimizer, loss_fn, train_acc_metric):
for batch in train_dataset:
with tf.GradientTape() as tape:
pred = model(tf.expand_dims(batch["data"], axis=3))
loss = loss_fn(batch["labels"], pred)

# calculate gradients and update the model weights
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_acc_metric.update_state(batch["labels"], pred)

train_acc = train_acc_metric.result()
print("Training acc: %.4f" % (float(train_acc),))
train_acc_metric.reset_states()


def test(model, test_dataset, test_acc_metric):
print("Evaluating on Test Set")
for batch in test_dataset:
pred = model(tf.expand_dims(batch["data"], axis=3), training=False)
test_acc_metric.update_state(batch["labels"], pred)

test_acc = test_acc_metric.result()
print("Test acc: %.4f" % (float(test_acc),))
test_acc_metric.reset_states()


def main():
BATCH_SIZE = 64
EPOCHS = 3
BATCH_SIZE = 64
EPOCHS = 3

optimizer = Adam()
train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
loss_fn = SparseCategoricalCrossentropy()
optimizer = Adam()
train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
loss_fn = SparseCategoricalCrossentropy()

# Load data
ds = dataset.load("abhinavtuli/fashion-mnist")
# Load data
ds = dataset.load("abhinavtuli/fashion-mnist")

# transform into Tensorflow dataset
ds = ds.to_tensorflow()
# transform into Tensorflow dataset
ds = ds.to_tensorflow()

# Splitting back into the original train and test sets
train_dataset = ds.take(60000)
test_dataset = ds.skip(60000)
# Splitting back into the original train and test sets
train_dataset = ds.take(60000)
test_dataset = ds.skip(60000)

train_dataset=train_dataset.batch(BATCH_SIZE)
test_dataset=test_dataset.batch(BATCH_SIZE)
train_dataset = train_dataset.batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)

model=create_CNN()
# model.summary()

for epoch in range(EPOCHS):
print("\nStarting Training Epoch {}".format(epoch))
train(model,train_dataset,optimizer,loss_fn,train_acc_metric)
print("Training Epoch {} finished\n".format(epoch))
test(model,test_dataset,test_acc_metric)

if __name__ == "__main__":
main()
model = create_CNN()
# model.summary()

for epoch in range(EPOCHS):
print("\nStarting Training Epoch {}".format(epoch))
train(model, train_dataset, optimizer, loss_fn, train_acc_metric)
print("Training Epoch {} finished\n".format(epoch))
test(model, test_dataset, test_acc_metric)


if __name__ == "__main__":
main()
1 change: 0 additions & 1 deletion examples/fashion-mnist/upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ def main():
images, labels = load_fashion_mnist(f, path=args.dataset_path)
dicts += [{"images": images, "labels": labels}]
images = np.concatenate([d["images"] for d in dicts])

labels = np.concatenate([np.array(d["labels"], dtype="int8") for d in dicts])
print(images.shape, labels.shape)

Expand Down

0 comments on commit 94cc2d8

Please sign in to comment.