Skip to content

Commit

Permalink
Adds a test for Vector output feature
Browse files Browse the repository at this point in the history
  • Loading branch information
msaisumanth committed Jul 29, 2019
1 parent c8ecab0 commit d460062
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 17 deletions.
2 changes: 2 additions & 0 deletions ludwig/features/base_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,8 @@ def concat_dependencies_and_build_output(
final_hidden[self.name] = (feature_hidden, feature_hidden_size)

# ================ Outputs ================
kwargs['is_training'] = is_training
kwargs['dropout_rate'] = dropout_rate
train_mean_loss, eval_loss, output_tensors = self.build_output(
feature_hidden,
feature_hidden_size,
Expand Down
39 changes: 23 additions & 16 deletions ludwig/features/vector_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ def __init__(self, feature):

_ = self.overwrite_defaults(feature)

self.decoder = 'fc_stack'
feature['fc_size'] = self.vector_size
self.decoder_obj = self.get_vector_decoder(feature)

def get_vector_decoder(self, decoder_parameters):
Expand All @@ -194,6 +196,7 @@ def _get_output_placeholder(self):
def _get_measures(self, targets, predictions):

with tf.variable_scope('measures_{}'.format(self.name)):
import pdb; pdb.set_trace()
error_val = get_error(
targets,
predictions,
Expand All @@ -208,18 +211,17 @@ def _get_measures(self, targets, predictions):
get_squared_error(targets, predictions, self.name), axis=1
)

r2_val = tf.reduce_sum(
get_r2(targets, predictions, self.name), axis=1
)
# TODO - not sure if this is correct
r2_val = tf.reduce_sum(get_r2(targets, predictions, self.name))

return error_val, squared_error_val, absolute_error_val, r2_val

def vector_loss(self, error_val, squared_error_val):
with tf.variable_scope('loss_{}'.format(self.name)):
if self.loss['type'] == MEAN_SQUARED_ERROR:
train_loss = tf.reduce_sum(squared_error_val, axis=1)
train_loss = tf.reduce_sum(squared_error_val)
elif self.loss['type'] == MEAN_ABSOLUTE_ERROR:
train_loss = tf.reduce_sum(error_val, axis=1)
train_loss = tf.reduce_sum(error_val)
else:
train_mean_loss = None
train_loss = None
Expand All @@ -246,8 +248,9 @@ def build_output(
self.decoder_obj,
hidden,
hidden_size,
kwargs['is_training'],
kwargs['dropout_rate'],
regularizer=regularizer,
kwarg=kwargs
)
return train_mean_loss, eval_loss, output_tensors

Expand All @@ -257,8 +260,9 @@ def build_vector_output(
decoder,
hidden,
hidden_size,
is_training,
dropout_rate,
regularizer=None,
**kwargs
):
feature_name = self.name
output_tensors = {}
Expand All @@ -268,11 +272,12 @@ def build_vector_output(

# ================ Predictions ================
predictions, predictions_size = self.vector_predictions(
targets,
decoder,
hidden,
hidden_size,
regularizer=regularizer
is_training,
dropout_rate,
regularizer=regularizer,
)

output_tensors[PREDICTIONS + '_' + feature_name] = predictions
Expand Down Expand Up @@ -317,21 +322,22 @@ def build_vector_output(

def vector_predictions(
self,
targets,
decoder,
hidden,
hidden_size,
is_training,
dropout_rate,
regularizer=None,
is_timeseries=False
):
with tf.variable_scope('predictions_{}'.format(self.name)):
output, output_size = decoder(
dict(self.__dict__),
targets,
# dict(self.__dict__),
# targets,
hidden,
hidden_size,
regularizer,
is_timeseries=is_timeseries
dropout_rate,
is_training
)

return output, output_size
Expand Down Expand Up @@ -424,8 +430,9 @@ def populate_defaults(output_feature):

set_default_value(output_feature, LOSS, {})
set_default_value(output_feature[LOSS], 'type', MEAN_SQUARED_ERROR)
set_default_value(output_feature, 'reduce_input', SUM)
set_default_value(output_feature, 'reduce_dependencies', SUM)
set_default_value(output_feature[LOSS], 'weight', 1)
set_default_value(output_feature, 'reduce_input', None)
set_default_value(output_feature, 'reduce_dependencies', None)
set_default_value(output_feature, 'decoder', 'fc_stack')
set_default_value(output_feature, 'dependencies', [])

Expand Down
11 changes: 10 additions & 1 deletion tests/integration_tests/test_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ def test_experiment_h3(csv_filename):
run_experiment(input_features, output_features, data_csv=rel_path)


def test_experiment_vector_feature(csv_filename):
def test_experiment_vector_feature_1(csv_filename):
input_features = [vector_feature()]
output_features = [binary_feature()]
# Generate test data
Expand All @@ -558,6 +558,15 @@ def test_experiment_vector_feature(csv_filename):
run_experiment(input_features, output_features, data_csv=rel_path)


def test_experiment_vector_feature_2(csv_filename):
input_features = [vector_feature()]
output_features = [vector_feature()]
# Generate test data
rel_path = generate_data(input_features, output_features, csv_filename)

run_experiment(input_features, output_features, data_csv=rel_path)


if __name__ == '__main__':
"""
To run tests individually, run:
Expand Down

0 comments on commit d460062

Please sign in to comment.