tensorflow实现的一个三层神经网络

    xiaoxiao2025-02-03  20

    # -*- coding: utf-8 -*- """ Created on Sat Aug 13 16:38:38 2016 cnn of myself,today! @author: root """ import tensorflow as tf import numpy as np ''' inputs : input x set input_feature_size : input feature size output_feature_size : output featutre size activation_function : activation function ''' def add_layer(inputs , input_feature_size , output_feature_size , activation_function = None):     Weights = tf.Variable(tf.random_normal([input_feature_size , output_feature_size]))     bias = tf.Variable(tf.zeros([1 , output_feature_size]) + 0.1)          Wx_plus_bias = tf.matmul(inputs , Weights) + bias          if(activation_function != None):         outputs = activation_function(Wx_plus_bias)     else :         outputs = Wx_plus_bias          return outputs #x_data shape : 300 1 x_data = np.linspace(-1 , 1 , 300 , dtype = np.float32)[: , np.newaxis] #with the same shape to x_data noise = np.random.normal(0 , 0.05 , x_data.shape) y_data = np.square(x_data) - 0.5 + noise ''' define two holder ''' x_holder = tf.placeholder(dtype = np.float32 , shape = [None , 1]) y_holder = tf.placeholder(dtype = np.float32 , shape = [None , 1]) ''' cnn : input layer ,  hidden layer , output layer input layer : x_data , 1 hidden layer : x_data , 1 ===> x_data.lines.num , 10 output layer : x_data.lines.num , 1 ''' l1 = add_layer(x_holder , 1 , 10 , activation_function = tf.nn.relu) l2 = add_layer(l1 , 10 , 1 , activation_function = None) #define the loss loss = tf.reduce_mean(tf.reduce_sum(tf.abs(y_holder - l2) , 1)) #define the proccess of train train = tf.train.GradientDescentOptimizer(0.1).minimize(loss) init = tf.initialize_all_variables() with tf.Session() as sess:     sess.run(init)     for i in range(2000):         sess.run(train , feed_dict={x_holder : x_data, y_holder : y_data})         if(i % 50 == 0):             print sess.run(loss , feed_dict={x_holder : x_data, y_holder : y_data})        
    转载请注明原文地址: https://ju.6miu.com/read-1296056.html
    最新回复(0)