Skip to content

Commit

Permalink
Lint all backend files.
Browse files Browse the repository at this point in the history
  • Loading branch information
seanlip committed Jan 2, 2016
1 parent a1ef009 commit db43cf6
Show file tree
Hide file tree
Showing 176 changed files with 4,803 additions and 4,891 deletions.
11 changes: 9 additions & 2 deletions .jscsrc
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,17 @@
{
"preset": "google",
"excludeFiles": [
"third_party/**",
"core/templates/dev/head/expressions/parser.js",
"core/templates/prod/**",
"core/tests/protractor.conf.js"
"core/tests/protractor.conf.js",
"extensions/interactions/LogicProof/static/js/generatedDefaultData.js",
"extensions/interactions/LogicProof/static/js/generatedParser.js",
"integrations/**",
"integrations_dev/**",
"scripts/expand_template.js",
"scripts/wikicities.js",
"static/scripts/**",
"third_party/**"
],
"fileExtensions": [".js"],
"extract": ["*.html"],
Expand Down
19 changes: 16 additions & 3 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ method-rgx=^([_a-z][a-z0-9_]*|__[a-z0-9]+__)$
const-rgx=^(([A-Z_][A-Z0-9_]*)|(__.*__)|([a-z_]+_models)|([a-z_]+_services))$

# Good variable names which should always be accepted, separated by a comma
good-names=e,_,f,p,fs,id,sc,setUp,tearDown,longMessage,maxDiff
good-names=e,_,d,f,i,l,p,w,fn,fs,id,pc,sc,zf,setUp,tearDown,longMessage,maxDiff

# Regex for dummy variables (to prevent 'unused argument' errors)
dummy-variables-rgx=_|unused_*
Expand All @@ -27,6 +27,9 @@ dummy-variables-rgx=_|unused_*
# Minimum number of public methods for a class (see R0903).
min-public-methods=0

# List of builtin function names that should not be used, separated by a comma
bad-functions=apply,input

[FORMAT]

indent-string=' '
Expand All @@ -37,19 +40,29 @@ ignore-imports=yes

[MESSAGES CONTROL]

# TODO(sll): Re-enable the following checks:
# TODO(sll): Consider re-enabling the following checks:
# abstract-method
# arguments-differ
# broad-except
# duplicate-code
# fixme
# missing-docstring
# no-member
# no-self-use
# redefined-variable-type
# too-many-arguments
# too-many-boolean-expressions
# too-many-branches
# too-many-instance-attributes
# too-many-lines
# too-many-locals
# too-many-public-methods
# too-many-statements
# and fix those issues.

disable=locally-disabled,locally-enabled,logging-not-lazy,arguments-differ,broad-except,fixme,missing-docstring,no-member,no-self-use,redefined-variable-type,too-many-branches,too-many-instance-attributes,too-many-lines,too-many-locals,too-many-statements
disable=locally-disabled,locally-enabled,logging-not-lazy,abstract-method,arguments-differ,broad-except,duplicate-code,fixme,missing-docstring,no-member,no-self-use,redefined-variable-type,too-many-arguments,too-many-boolean-expressions,too-many-branches,too-many-instance-attributes,too-many-lines,too-many-locals,too-many-public-methods,too-many-statements

[REPORTS]

# Hide statistics, external dependencies and overall rating.
reports=no
4 changes: 2 additions & 2 deletions appengine_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

"""Configuration for App Engine."""
# pylint: disable-all

__author__ = 'sll@google.com (Sean Lip)'
"""Configuration for App Engine."""

import logging
import os
Expand Down
3 changes: 1 addition & 2 deletions core/controllers/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import user_services
from core.platform import models
import feconf
import utils
Expand All @@ -46,7 +45,7 @@ def test_super_admin(self, **kwargs):
self.redirect(
current_user_services.create_login_url(self.request.uri))
return
if not user_services.is_super_admin(self.user_id, self.request):
if not current_user_services.is_current_user_super_admin():
raise self.UnauthorizedUserException(
'%s is not a super admin of this application', self.user_id)
return handler(self, **kwargs)
Expand Down
6 changes: 3 additions & 3 deletions core/controllers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ def __init__(self, request, response): # pylint: disable=super-init-not-called
# Initializes the return dict for the handlers.
self.values = {}

self.user = current_user_services.get_current_user(self.request)
self.user = current_user_services.get_current_user()
self.user_id = current_user_services.get_user_id(
self.user) if self.user else None
self.username = None
Expand Down Expand Up @@ -281,8 +281,8 @@ def __init__(self, request, response): # pylint: disable=super-init-not-called

self.is_moderator = rights_manager.Actor(self.user_id).is_moderator()
self.is_admin = rights_manager.Actor(self.user_id).is_admin()
self.is_super_admin = user_services.is_super_admin(
self.user_id, self.request)
self.is_super_admin = (
current_user_services.is_current_user_super_admin())

self.values['is_moderator'] = self.is_moderator
self.values['is_admin'] = self.is_admin
Expand Down
2 changes: 1 addition & 1 deletion core/controllers/base_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def test_dev_indicator_appears_in_dev_and_not_in_production(self):
def test_that_no_get_results_in_500_error(self):
"""Test that no GET request results in a 500 error."""

for route in main.urls:
for route in main.URLS:
# This was needed for the Django tests to pass (at the time we had
# a Django branch of the codebase).
if isinstance(route, tuple):
Expand Down
2 changes: 1 addition & 1 deletion core/controllers/editor.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ def put(self, exploration_id):
raise self.InvalidInputException(
'Moderator actions should include an email to the '
'recipient.')
email_manager.require_moderator_email_prereqs_are_satisfied(action)
email_manager.require_moderator_email_prereqs_are_satisfied()

# Perform the moderator action.
if action == 'unpublish_exploration':
Expand Down
2 changes: 0 additions & 2 deletions core/counters.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

"""Services for performance counters."""

__author__ = 'Sean Lip'


class PerfCounter(object):
"""Generic in-process numeric counter; not aggregated across instances."""
Expand Down
30 changes: 22 additions & 8 deletions core/domain/classifier_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,19 @@ def __init__(self):
self._alpha = self._DEFAULT_ALPHA
self._beta = self._DEFAULT_BETA

# These should be initialized in load_examples() or from_dict().
self._b_dl = None
self._c_dl = None
self._c_l = None
self._c_lw = None
self._l_dp = None
self._w_dp = None
self._label_to_id = None
self._word_to_id = None
self._num_docs = None
self._num_labels = None
self._num_words = None

self._training_iterations = self._DEFAULT_TRAINING_ITERATIONS
self._prediction_iterations = self._DEFAULT_PREDICTION_ITERATIONS

Expand Down Expand Up @@ -262,11 +275,10 @@ def _get_label_probabilities(self, d):
"""Returns a list of label probabilities for a given doc, indexed by
label id.
"""
unnormalized_label_probabilities = (
unnormalized_label_probs = (
self._c_dl[d] + (self._b_dl[d] * self._alpha))
label_probabilities = (
unnormalized_label_probabilities /
unnormalized_label_probabilities.sum())
unnormalized_label_probs / unnormalized_label_probs.sum())
return label_probabilities

def _get_prediction_report_for_doc(self, d):
Expand Down Expand Up @@ -339,8 +351,8 @@ def _iterate_gibbs_sampling(self, iterations, doc_ids):
"""Runs Gibbs sampling for "iterations" number of times on the provided
docs.
"""
for i in xrange(iterations):
statez = self._run_gibbs_sampling(doc_ids)
for _ in xrange(iterations):
self._run_gibbs_sampling(doc_ids)

def _add_examples(self, examples, iterations):
"""Adds examples to the internal state of the classifier, assigns
Expand All @@ -357,7 +369,9 @@ def _add_examples(self, examples, iterations):
last_num_words = self._num_words

# Increments _num_labels with any new labels
[map(self._get_label_id, labels) for labels in labels_list]
for labels in labels_list:
for label in labels:
self._get_label_id(label)
self._num_docs += len(docs)

self._b_dl = numpy.concatenate(
Expand Down Expand Up @@ -412,8 +426,8 @@ def add_examples_for_predicting(self, prediction_examples):
"""
all_labels = self._label_to_id.keys()
return self._add_examples(
zip(prediction_examples, [copy.deepcopy(all_labels) for _ in
prediction_examples]),
zip(prediction_examples, [
copy.deepcopy(all_labels) for _ in prediction_examples]),
self._prediction_iterations)

def load_examples(self, examples):
Expand Down
23 changes: 13 additions & 10 deletions core/domain/classifier_services_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
from core.domain import classifier_services
from core.tests import test_utils

# pylint: disable=protected-access


class StringClassifierUnitTests(test_utils.GenericTestBase):

Expand All @@ -40,7 +42,7 @@ def setUp(self):
self.string_classifier = classifier_services.StringClassifier()
self.string_classifier.load_examples(self._EXAMPLES_TRAIN)

def _validate_instance(self, string_classifier):
def _validate_instance(self):
self.assertIn('_alpha', dir(self.string_classifier))
self.assertIn('_beta', dir(self.string_classifier))
self.assertIn('_prediction_threshold', dir(self.string_classifier))
Expand Down Expand Up @@ -93,37 +95,37 @@ def test_valid_state(self):
self.assertEquals(self.string_classifier._num_labels, 3)
self.assertEquals(self.string_classifier._num_docs, 2)
self.assertEquals(self.string_classifier._num_words, 7)
self._validate_instance(self.string_classifier)
self._validate_instance()

def test_add_train_examples(self):
self.string_classifier.add_examples_for_training(
self._NEW_EXAMPLES_TRAIN)
self.assertEquals(self.string_classifier._num_labels, 3)
self.assertEquals(self.string_classifier._num_docs, 3)
self.assertEquals(self.string_classifier._num_words, 10)
self._validate_instance(self.string_classifier)
self._validate_instance()

def test_add_test_examples(self):
self.string_classifier.add_examples_for_predicting(self._EXAMPLES_TEST)
self.assertEquals(self.string_classifier._num_labels, 3)
self.assertEquals(self.string_classifier._num_docs, 5)
self.assertEquals(self.string_classifier._num_words, 34)
self._validate_instance(self.string_classifier)
self._validate_instance()

def test_empty_load(self):
self.string_classifier.load_examples([])
# Still got the default label
self.assertEquals(self.string_classifier._num_labels, 1)
self.assertEquals(self.string_classifier._num_docs, 0)
self.assertEquals(self.string_classifier._num_words, 0)
self._validate_instance(self.string_classifier)
self._validate_instance()

def test_empty_add(self):
self.string_classifier.add_examples_for_training([])
self.assertEquals(self.string_classifier._num_labels, 3)
self.assertEquals(self.string_classifier._num_docs, 2)
self.assertEquals(self.string_classifier._num_words, 7)
self._validate_instance(self.string_classifier)
self._validate_instance()

def test_model_to_and_from_dict(self):
self.assertEquals(
Expand Down Expand Up @@ -185,18 +187,19 @@ def test_reload_valid_state(self):
self.string_classifier._num_docs,
len(self._NEW_EXAMPLES_TRAIN))
self.assertEquals(self.string_classifier._num_words, 4)
self._validate_instance(self.string_classifier)
self._validate_instance()

def test_prediction_report(self):
def _mock_get_label_probabilities(d):
self.assertEquals(d, -1)
return [0.5, 0.3, 0.2]

def _mock_get_label_id(label):
def _mock_get_label_id(unused_label):
return 0

def _mock_get_label_name(l):
def _mock_get_label_name(unused_label):
return 'fake_label'

self.string_classifier._prediction_threshold = 0
self.string_classifier._get_label_probabilities = (
_mock_get_label_probabilities)
Expand Down Expand Up @@ -230,4 +233,4 @@ def test_predict_label_for_doc(self):
predicted_label = self.string_classifier.predict_label_for_doc(
doc_ids[2])
self.assertEquals(predicted_label, '_default')
self._validate_instance(self.string_classifier)
self._validate_instance()
51 changes: 25 additions & 26 deletions core/domain/collection_domain.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@
should therefore be independent of the specific storage models used.
"""

__author__ = 'Ben Henning'

import copy

import feconf
Expand Down Expand Up @@ -157,14 +155,14 @@ class CollectionNode(object):
completed.
"""

"""Constructs a new CollectionNode object.
Args:
exploration_id: A valid ID of an exploration referenced by this node.
prerequisite_skills: A list of skills (strings).
acquired_skills: A list of skills (strings).
"""
def __init__(self, exploration_id, prerequisite_skills, acquired_skills):
"""Constructs a new CollectionNode object.
Args:
- exploration_id: A valid ID of an exploration referenced by this node.
- prerequisite_skills: A list of skills (strings).
- acquired_skills: A list of skills (strings).
"""
self.exploration_id = exploration_id
self.prerequisite_skills = prerequisite_skills
self.acquired_skills = acquired_skills
Expand Down Expand Up @@ -252,23 +250,24 @@ def create_default_node(cls, exploration_id):
class Collection(object):
"""Domain object for an Oppia collection."""

"""Constructs a new collection given all the information necessary to
represent a collection.
Note: The schema_version represents the version of any underlying
dictionary or list structures stored within the collection. In particular,
the schema for CollectionNodes is represented by this version. If the
schema for CollectionNode changes, then a migration function will need to
be added to this class to convert from the current schema version to the
new one. This function should be called in both from_yaml in this class and
collection_services._migrate_collection_to_latest_schema.
feconf.CURRENT_COLLECTION_SCHEMA_VERSION should be incremented and the new
value should be saved in the collection after the migration process,
ensuring it represents the latest schema version.
"""
def __init__(self, collection_id, title, category, objective,
schema_version, nodes, version, created_on=None,
last_updated=None):
"""Constructs a new collection given all the information necessary to
represent a collection.
Note: The schema_version represents the version of any underlying
dictionary or list structures stored within the collection. In
particular, the schema for CollectionNodes is represented by this
version. If the schema for CollectionNode changes, then a migration
function will need to be added to this class to convert from the
current schema version to the new one. This function should be called
in both from_yaml in this class and
collection_services._migrate_collection_to_latest_schema.
feconf.CURRENT_COLLECTION_SCHEMA_VERSION should be incremented and the
new value should be saved in the collection after the migration
process, ensuring it represents the latest schema version.
"""
self.id = collection_id
self.title = title
self.category = category
Expand Down Expand Up @@ -411,9 +410,9 @@ def update_objective(self, objective):
self.objective = objective

def _find_node(self, exploration_id):
for i in range(len(self.nodes)):
if self.nodes[i].exploration_id == exploration_id:
return i
for ind, node in enumerate(self.nodes):
if node.exploration_id == exploration_id:
return ind
return None

def get_node(self, exploration_id):
Expand Down
Loading

0 comments on commit db43cf6

Please sign in to comment.