import tensorflow as tf
import numpy as np
xy = np.loadtxt('07train.txt', unpack=True)
x_data = np.transpose(xy[0:-1])
y_data = np.reshape(xy[-1], (4, 1))
print x_data
print y_data
X = tf.placeholder(tf.float32, name='x-input')
Y = tf.placeholder(tf.float32, name='y-input')
w1 = tf.Variable(tf.random_uniform([2, 5], -1.0, 1.0), name='weight1')
w2 = tf.Variable(tf.random_uniform([5, 10], -1.0, 1.0), name='weight2')
w3 = tf.Variable(tf.random_uniform([10, 10], -1.0, 1.0), name='weight3')
w4 = tf.Variable(tf.random_uniform([10, 10], -1.0, 1.0), name='weight4')
w5 = tf.Variable(tf.random_uniform([10, 10], -1.0, 1.0), name='weight5')
w6 = tf.Variable(tf.random_uniform([10, 10], -1.0, 1.0), name='weight6')
w7 = tf.Variable(tf.random_uniform([10, 10], -1.0, 1.0), name='weight7')
w8 = tf.Variable(tf.random_uniform([10, 1], -1.0, 1.0), name='weight8')
b1 = tf.Variable(tf.zeros([5]), name="Bias1")
b3 = tf.Variable(tf.zeros([10]), name="Bias3")
b2 = tf.Variable(tf.zeros([10]), name="Bias2")
b4 = tf.Variable(tf.zeros([10]), name="Bias4")
b5 = tf.Variable(tf.zeros([10]), name="Bias5")
b6 = tf.Variable(tf.zeros([10]), name="Bias6")
b7 = tf.Variable(tf.zeros([10]), name="Bias7")
b8 = tf.Variable(tf.zeros([1]), name="Bias8")
L2 = tf.nn.relu(tf.matmul(X, w1) + b1)
L3 = tf.nn.relu(tf.matmul(L2, w2) + b2)
L4 = tf.nn.relu(tf.matmul(L3, w3) + b3)
L5 = tf.nn.relu(tf.matmul(L4, w4) + b4)
L6 = tf.nn.relu(tf.matmul(L5, w5) + b5)
L7 = tf.nn.relu(tf.matmul(L6, w6) + b6)
L8 = tf.nn.relu(tf.matmul(L7, w7) + b7)
hypothesis = tf.sigmoid(tf.matmul(L8, w8) + b8)
with tf.name_scope('cost') as scope:
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1-Y) * tf.log(1 - hypothesis))
cost_summ = tf.summary.scalar("cost", cost)
with tf.name_scope('train') as scope:
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
w1_hist = tf.summary.histogram("weights1", w1)
w2_hist = tf.summary.histogram("weights2", w2)
b1_hist = tf.summary.histogram("biases1", b1)
b2_hist = tf.summary.histogram("biases2", b2)
y_hist = tf.summary.histogram("y", Y)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs/xor_logs", sess.graph)
for step in xrange(20000):
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 200 == 0:
summary = sess.run(merged, feed_dict={X: x_data, Y: y_data})
writer.add_summary(summary, step)
print step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(w1), sess.run(w2)
correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction], feed_dict={X: x_data, Y: y_data})
print "accuracy", accuracy.eval({X: x_data, Y: y_data})
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
# Xavier initialization
# Glorot et al.2010
W = np.random.randn(fan_in, fan_out)/np.sqrt(fan_in)
# He et al.2015
W = np.random.randn(fan_in, fan_out)/np.sqrt(fan_in/2)
def xavier_init(n_inputs, n_outputs, uniform=True):
"""Set the parameter initialization using the method described.
This method is designed to keep the scale of the gradients roughly the same
in all layers.
Xavier Glorot and Yoshua Bengio (2010):
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.
Args:
n_inputs: The number of input nodes into each output.
n_outputs: The number of output nodes for each input.
uniform: If true use a uniform distribution, otherwise use a normal.
Returns:
An initializer.
"""
if uniform:
# 6 was used in the paper.
init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
# 3 gives us approximately the same limits as above since this repicks
# values greater than 2 standard deviations from the mean.
stddev = math.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
Dropout: A Simple Way to Prevent Neural Networks from Overfitting [Srivastava et al. 2014]
Regularization: Dropout “randomly set some neurons to zero in the forward pass”
dropout_rate = tf.placeholder("float")
_L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), B1))
L1 = tf.nn.dropout(_L1, dropout_rate)
- TRAIN:
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7})
- EVALUATION:
print "Accuracy:", accuracy.eval({X: mnist.test.images, Y:mnist.test.labels, dropout_rate: 1})
sample code
h = tf.matmul(W, X)
hypothesis = tf.div(1., 1. + tf.exp(-h))
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
a = tf.Variable(0.1) # learning rate, alpha
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost) # goal is minimize cost
train-labels-idx1-ubyte.gz : training set lables (18881 bytes)
t10k-labels-idx1-ubyte.gz : test set lables ( 4542 bytes)
CNN Base 인 cs231n을 정리하기
이메일을 암복호화 할때는 공개키와 공유키를 조합 사용한다.
SSL ( Secure Sockets Layer), TLS(Transport Layer Secure)
HTTPS : SSL/TLS을 사용한다.
WEP(Wired Equipment Privacy), WPA(Wi-fi Protected Access)
Virtual Private Network
PPP : 전화회선을 사용해서 원격지와 접속 이더넷: UTP 케이블을 사용 ARP : IP address 를 이용하여 목적지의 MAC address 찾기
명령어로 “ARP -a “ : 캐시에 있는 것도 표시할 때.
헤더 내의 오퍼래이션 코드가 1:요청, 2:응답
SGDK330B was newly lined up to analyzer (June 2015)
SGDK330B Main POD has 1GB memory on board, so it has following functions
Architecture of SGDK330B is the same as SGDK330A.
All of Mini POD can be used for both SGDK330A and SGDK330B.
Differences between SGDK330A and SGDK330B are the items which are related to 1GB on board.
All of data (DATA[7:0]) are connected case
Only DATA[0] is connected case