Skip to content

Commit

Permalink
Use print() function in Python 2 and 3
Browse files Browse the repository at this point in the history
  • Loading branch information
cclauss committed Sep 30, 2018
1 parent 97d673d commit 48a174a
Show file tree
Hide file tree
Showing 46 changed files with 248 additions and 199 deletions.
11 changes: 6 additions & 5 deletions src/py2.x/dl/bp.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-

from __future__ import print_function
import random
from numpy import *

Expand Down Expand Up @@ -273,7 +274,7 @@ def dump(self):
'''
# 遍历层的所有的节点 nodes,将节点信息打印出来
for node in self.nodes:
print node
print(node)


# Connection 对象类,主要负责记录连接的权重,以及这个连接所关联的上下游的节点
Expand Down Expand Up @@ -396,7 +397,7 @@ def dump(self):
None
'''
for conn in self.connections:
print conn
print(conn)


# Network 对象,提供相应 API
Expand Down Expand Up @@ -743,7 +744,7 @@ def gradient_check(network, sample_feature, sample_label):
expected_gradient = (error2 - error1) / (2 * epsilon)

# 打印
print 'expected gradient: \t%f\nactual gradient: \t%f' % (expected_gradient, actual_gradient)
print('expected gradient: \t%f\nactual gradient: \t%f' % (expected_gradient, actual_gradient))


def train_data_set():
Expand Down Expand Up @@ -804,7 +805,7 @@ def test(network, data):
# 对测试数据进行预测
predict_data = network.predict(norm_data)
# 将结果打印出来
print '\ttestdata(%u)\tpredict(%u)' % (data, normalizer.denorm(predict_data))
print('\ttestdata(%u)\tpredict(%u)' % (data, normalizer.denorm(predict_data)))


def correct_ratio(network):
Expand All @@ -821,7 +822,7 @@ def correct_ratio(network):
for i in range(256):
if normalizer.denorm(network.predict(normalizer.norm(i))) == i:
correct += 1.0
print 'correct_ratio: %.2f%%' % (correct / 256 * 100)
print('correct_ratio: %.2f%%' % (correct / 256 * 100))


def gradient_check_test():
Expand Down
15 changes: 8 additions & 7 deletions src/py2.x/dl/cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# -*- coding: UTF-8 -*-


from __future__ import print_function
import numpy as np
from activators import ReluActivator, IdentityActivator

Expand Down Expand Up @@ -387,14 +388,14 @@ def init_test():
def test():
a, b, cl = init_test()
cl.forward(a)
print cl.output_array
print(cl.output_array)

def test_bp():
a, b, cl = init_test()
cl.backward(a, b, IdentityActivator())
cl.update()
print cl.filters[0]
print cl.filters[1]
print(cl.filters[0])
print(cl.filters[1])


def gradient_check():
Expand Down Expand Up @@ -427,8 +428,8 @@ def gradient_check():
err2 = error_function(cl.output_array)
expect_grad = (err1 - err2) / (2 * epsilon)
cl.filters[0].weights[d,i,j] += epsilon
print 'weights(%d,%d,%d): expected - actural %f - %f' % (
d, i, j, expect_grad, cl.filters[0].weights_grad[d,i,j])
print('weights(%d,%d,%d): expected - actural %f - %f' % (
d, i, j, expect_grad, cl.filters[0].weights_grad[d,i,j]))


def init_pool_test():
Expand Down Expand Up @@ -456,10 +457,10 @@ def init_pool_test():
def test_pool():
a, b, mpl = init_pool_test()
mpl.forward(a)
print 'input array:\n%s\noutput array:\n%s' % (a, mpl.output_array)
print('input array:\n%s\noutput array:\n%s' % (a, mpl.output_array))


def test_pool_bp():
a, b, mpl = init_pool_test()
mpl.backward(a, b)
print 'input array:\n%s\nsensitivity array:\n%s\ndelta array:\n%s' % (a, b, mpl.delta_array)
print('input array:\n%s\nsensitivity array:\n%s\ndelta array:\n%s' % (a, b, mpl.delta_array))
13 changes: 7 additions & 6 deletions src/py2.x/dl/fc.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# -*- coding: UTF-8 -*-


from __future__ import print_function
import random
import numpy as np
from activators import SigmoidActivator, IdentityActivator
Expand Down Expand Up @@ -57,7 +58,7 @@ def update(self, learning_rate):
self.b += learning_rate * self.b_grad

def dump(self):
print 'W: %s\nb:%s' % (self.W, self.b)
print('W: %s\nb:%s' % (self.W, self.b))


# 神经网络类
Expand Down Expand Up @@ -149,8 +150,8 @@ def gradient_check(self, sample_feature, sample_label):
err2 = self.loss(sample_label, output)
expect_grad = (err1 - err2) / (2 * epsilon)
fc.W[i,j] += epsilon
print 'weights(%d,%d): expected - actural %.4e - %.4e' % (
i, j, expect_grad, fc.W_grad[i,j])
print('weights(%d,%d): expected - actural %.4e - %.4e' % (
i, j, expect_grad, fc.W_grad[i,j]))


from bp import train_data_set
Expand Down Expand Up @@ -197,7 +198,7 @@ def correct_ratio(network):
for i in range(256):
if normalizer.denorm(network.predict(normalizer.norm(i))) == i:
correct += 1.0
print 'correct_ratio: %.2f%%' % (correct / 256 * 100)
print('correct_ratio: %.2f%%' % (correct / 256 * 100))


def test():
Expand All @@ -208,10 +209,10 @@ def test():
epoch = 10
for i in range(epoch):
net.train(labels, data_set, rate, mini_batch)
print 'after epoch %d loss: %f' % (
print('after epoch %d loss: %f' % (
(i + 1),
net.loss(labels[-1], net.predict(data_set[-1]))
)
))
rate /= 2
correct_ratio(net)

Expand Down
11 changes: 6 additions & 5 deletions src/py2.x/dl/linear_unit.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# -*- coding: UTF-8 -*-

# 引入 Perceptron 类
from __future__ import print_function
from perceptron import Perceptron

# 定义激活函数 f
Expand Down Expand Up @@ -112,10 +113,10 @@ def plot(linear_unit):
# 首先训练我们的线性单元
linear_unit = train_linear_unit()
# 打印训练获得的权重 和 偏置
print linear_unit
print(linear_unit)
# 测试
print 'Work 3.4 years, monthly salary = %.2f' % linear_unit.predict([3.4])
print 'Work 15 years, monthly salary = %.2f' % linear_unit.predict([15])
print 'Work 1.5 years, monthly salary = %.2f' % linear_unit.predict([1.5])
print 'Work 6.3 years, monthly salary = %.2f' % linear_unit.predict([6.3])
print('Work 3.4 years, monthly salary = %.2f' % linear_unit.predict([3.4]))
print('Work 15 years, monthly salary = %.2f' % linear_unit.predict([15]))
print('Work 1.5 years, monthly salary = %.2f' % linear_unit.predict([1.5]))
print('Work 6.3 years, monthly salary = %.2f' % linear_unit.predict([6.3]))
plot(linear_unit)
5 changes: 3 additions & 2 deletions src/py2.x/dl/lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# -*- coding: UTF-8 -*-


from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from cnn import element_wise_op
Expand Down Expand Up @@ -319,8 +320,8 @@ def gradient_check():
err2 = error_function(lstm.h_list[-1])
expect_grad = (err1 - err2) / (2 * epsilon)
lstm.Wfh[i,j] += epsilon
print 'weights(%d,%d): expected - actural %.4e - %.4e' % (
i, j, expect_grad, lstm.Wfh_grad[i,j])
print('weights(%d,%d): expected - actural %.4e - %.4e' % (
i, j, expect_grad, lstm.Wfh_grad[i,j]))
return lstm


Expand Down
9 changes: 5 additions & 4 deletions src/py2.x/dl/mnist.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-

from __future__ import print_function
import struct
from fc import *
from datetime import datetime
Expand Down Expand Up @@ -124,7 +125,7 @@ def show(sample):
else:
str += ' '
str += '\n'
print str
print(str)


def get_result(vec):
Expand Down Expand Up @@ -162,11 +163,11 @@ def train_and_evaluate():
while True:
epoch += 1
network.train(train_labels, train_data_set, 0.01, 1)
print '%s epoch %d finished, loss %f' % (now(), epoch,
network.loss(train_labels[-1], network.predict(train_data_set[-1])))
print('%s epoch %d finished, loss %f' % (now(), epoch,
network.loss(train_labels[-1], network.predict(train_data_set[-1]))))
if epoch % 2 == 0:
error_ratio = evaluate(network, test_data_set, test_labels)
print '%s after epoch %d, error ratio is %f' % (now(), epoch, error_ratio)
print('%s after epoch %d, error ratio is %f' % (now(), epoch, error_ratio))
if error_ratio > last_error_ratio:
break
else:
Expand Down
11 changes: 6 additions & 5 deletions src/py2.x/dl/perceptron.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

# 神经元 / 感知器

from __future__ import print_function
class Perceptron():
'''
Desc:
Expand Down Expand Up @@ -178,9 +179,9 @@ def train_and_perceptron():
# 训练 and 感知器
and_perceptron = train_and_perceptron()
# 打印训练获得的权重
print and_perceptron
print(and_perceptron)
# 测试
print '1 and 1 = %d' % and_perceptron.predict([1, 1])
print '0 and 0 = %d' % and_perceptron.predict([0, 0])
print '1 and 0 = %d' % and_perceptron.predict([1, 0])
print '0 and 1 = %d' % and_perceptron.predict([0, 1])
print('1 and 1 = %d' % and_perceptron.predict([1, 1]))
print('0 and 0 = %d' % and_perceptron.predict([0, 0]))
print('1 and 0 = %d' % and_perceptron.predict([1, 0]))
print('0 and 1 = %d' % and_perceptron.predict([0, 1]))
13 changes: 7 additions & 6 deletions src/py2.x/dl/recursive.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# -*- coding: UTF-8 -*-


from __future__ import print_function
import numpy as np
from activators import IdentityActivator

Expand Down Expand Up @@ -114,11 +115,11 @@ def calc_gradient(self, parent):
return W_grad, b_grad

def dump(self, **kwArgs):
print 'root.data: %s' % self.root.data
print 'root.children_data: %s' % self.root.children_data
print('root.data: %s' % self.root.data)
print('root.children_data: %s' % self.root.children_data)
if kwArgs.has_key('dump_grad'):
print 'W_grad: %s' % self.W_grad
print 'b_grad: %s' % self.b_grad
print('W_grad: %s' % self.W_grad)
print('b_grad: %s' % self.b_grad)


def data_set():
Expand Down Expand Up @@ -167,8 +168,8 @@ def gradient_check():
err2 = error_function(rnn.root.data)
expect_grad = (err1 - err2) / (2 * epsilon)
rnn.W[i,j] += epsilon
print 'weights(%d,%d): expected - actural %.4e - %.4e' % (
i, j, expect_grad, rnn.W_grad[i,j])
print('weights(%d,%d): expected - actural %.4e - %.4e' % (
i, j, expect_grad, rnn.W_grad[i,j]))
return rnn


Expand Down
5 changes: 3 additions & 2 deletions src/py2.x/dl/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# -*- coding: UTF-8 -*-


from __future__ import print_function
import numpy as np
from cnn import element_wise_op
from activators import ReluActivator, IdentityActivator
Expand Down Expand Up @@ -143,8 +144,8 @@ def gradient_check():
err2 = error_function(rl.state_list[-1])
expect_grad = (err1 - err2) / (2 * epsilon)
rl.W[i,j] += epsilon
print 'weights(%d,%d): expected - actural %f - %f' % (
i, j, expect_grad, rl.gradient[i,j])
print('weights(%d,%d): expected - actural %f - %f' % (
i, j, expect_grad, rl.gradient[i,j]))


def test():
Expand Down
13 changes: 7 additions & 6 deletions src/py2.x/ml/1.MLFoundation/NumPy.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
Author: Peter Harrington/1988/片刻
GitHub: https://github.com/apachecn/AiLearning
'''
from __future__ import print_function

from numpy import random, mat, eye

Expand Down Expand Up @@ -43,15 +44,15 @@
TraRandMat = randMat.T
ArrRandMat = randMat.A
# 输出结果
print 'randArray=(%s) \n' % type(randArray), randArray
print 'randMat=(%s) \n' % type(randMat), randMat
print 'invRandMat=(%s) \n' % type(invRandMat), invRandMat
print 'TraRandMat=(%s) \n' % type(TraRandMat), TraRandMat
print 'ArrRandMat=(%s) \n' % type(ArrRandMat), ArrRandMat
print('randArray=(%s) \n' % type(randArray), randArray)
print('randMat=(%s) \n' % type(randMat), randMat)
print('invRandMat=(%s) \n' % type(invRandMat), invRandMat)
print('TraRandMat=(%s) \n' % type(TraRandMat), TraRandMat)
print('ArrRandMat=(%s) \n' % type(ArrRandMat), ArrRandMat)
# 矩阵和逆矩阵 进行求积 (单位矩阵,对角线都为1嘛,理论上4*4的矩阵其他的都为0)
myEye = randMat*invRandMat
# 误差
print myEye - eye(4)
print(myEye - eye(4))

'''
如果上面的代码运行没有问题,说明numpy安装没有问题
Expand Down
Loading

0 comments on commit 48a174a

Please sign in to comment.