딥러닝을 이용한 linear regression model
linear_regression.py
import tensorflow as tf
x_data = [32.0,64.0,96.0,118.0,126.0,144.0,152.5,158.0] # x_data = [1., 2., 3.]
y_data = [18.0,24.0,61.5, 49.0, 52.0,105.0,130.3,125.0] # y_data = [1., 2., 3.]
# try to find values for w and b that compute y_data = W * x_data + b
W = tf.Variable(tf.random_normal([1], -10.0, 10.0)) # -1 ~ 1
b = tf.Variable(tf.random_normal([1], -100.0, 100.0)) # -1 ~ 1
# my hypothesis
hypothesis = W * x_data + b
# Simplified cost function
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
# minimize
rate = tf.Variable(0.00001) # learning rate, alpha #0.1
optimizer = tf.train.GradientDescentOptimizer(rate)
train = optimizer.minimize(cost)
# before starting, initialize the variables. We will 'run' this first.
init = tf.global_variables_initializer()
# launch the graph
sess = tf.Session()
sess.run(init)
# fit the line
for step in range(2000001):
sess.run(train)
if step % 20000 == 0:
print ('{:4} {} {} {}'.format(step, sess.run(cost), sess.run(W), sess.run(b)))
sess.close() # learns best fit is W: [ 0.87189066], b: [-26.40464592]
|