# Given you have a keras model : model = Sequential().add(..).add(..) bla bla # Tensors : an array of numbers, consists of n-dimensions. eg- a rank-2 tensor means a 2-D array # constants : simple, non-trainable tensors # Tensorflow uses graphs, where edges are tensors and nodes are operations numpy_array = tensor_variable.numpy() # Convert tensors to numpy import tensorflow as tf d0 = tf.ones(( 1,)) # 0D Tensor (A single number) d1 = tf.ones(( 2 ,)) # 1D Tensor d2 = tf.ones(( 2 ,2)) # 2D Tensor d3 = tf.ones((2 , 2 , 2)) # 3D Tensor print(d3.numpy()) # printing tensors col_x = tf.cast(df['col_x'], tf.bool) # Casting a column of a dataframe as a tensorflow boolean tensor type col_x = np.array(housing['col_x'], np.bool) # Alternative way col_x = tf.Variable(col_x) a = tf.constant(3, shape=[2, 3]) # Define a 2x3 constant. b = tf.constant([1, 2, 3, 4], shape=[2, 2]) # Define a 2x2 constant. # Other functions : tf.zeros([2, 2]), tf.zeros_like(input_tensor), tf.ones_like(input_tensor), tf.fill([3, 3], 7) # Computing with tensors and variables a0 = tf.Variable([1, 2, 3, 4, 5, 6], dtype=tf.float32) # Define a variable a1 = tf.Variable([1, 2, 3, 4, 5, 6], dtype=tf.int16) # Define a variable b = tf.constant(2, tf.float32) # Define a constant c0 = tf.multiply(a0, b) # Compute their product c1 = a0*b # Define 0-dimensional tensors (Scalars) A0 = constant([1]) B0 = constant([2]) # Define 1-dimensional tensors (Vectors) A1 = constant([1,2]) B1 = constant([3,4]) # Define 2-dimensional tensors (Matrices) A2 = constant([[1,2], [3,4]]) B2 = constant([[5,6], [7,8]]) C0 = tf.add(A0, B0) # Scalar addition, result is a scalar value [3] C1 = tf.add(A1, B1) # Vector addition, result is a Vector value [4, 6] C2 = tf.add(A2, B2) # Matrix addition, result is a Matrix value [[6, 8], [10, 12]] A = ones([2,3,4]) # Define a 2x3x4 tensor of ones B = tf.reduce_sum(A) # Sum over all dimensions (24) B0 = tf.reduce_sum(A, 0) # Sum over dimensions 0 (a 3X4 matrix of 2s) B1 = tf.reduce_sum(A, 1) # Sum over dimensions 1 (a 2X4 matrix of 3s) B2 = tf.reduce_sum(A, 2) # Sum over dimensions 2 (a 2X3 matrix of 4s) # Other operations are: matmul(), multiply(), random(), reshape() gray = tf.random.uniform([2, 2], maxval=255, dtype='int32') # Create a matrix of random uniform values with max value 255 (image) gray = tf.reshape(gray, [2*2, 1]) # Reshape grayscale image from 2X2 to 4X1 # Doing derivative (gradient) with tensorflow x = tf.Variable(-1.0) # Define value of x # Define y within instance of GradientTape with tf.GradientTape() as tape: tape.watch(x) # performing the derivative with respect to x y = tf.multiply(x, x) # y is a function of x*x # Evaluate the gradient of y at x = -1 g = tape.gradient(y, x) # Perform the derivative print(g.numpy()) # Show result loss = tf.keras.losses.mse(targets, predictions) # Compute the MSE loss opt = tf.keras.optimizers.Adam() # Define an optimization operation # See TENSORFLOW MODEL # Constructing a simple dense layer with activation function inputs = tf.constant([[1, 35]]) # Define inputs (features) weights = tf.Variable([[-0.05], [-0.01]]) # Define weights bias = tf.Variable([0.5]) # Define the bias product = tf.matmul(inputs, weights) # Forward propagation step: calculating weight*inputs (m*X) dense = tf.keras.activations.sigmoid(product+bias) # Forward propagation step: calculating m*X+c with sigmoid activation