pip install tensorflow
# TensorFlow 1.x code: import tensorflow as tf a = tf.constant(2) b = tf.constant(3) sess = tf.Session() print(sess.run(a + b)) sess.close()
import tensorflow as tf print(tf.__version__)
from tensorflow.python.client import device_lib print(device_lib.list_local_devices())
# Import TensorFlow library import tensorflow as tf # Print TensorFlow version print("TensorFlow version:", tf.__version__) # Create a constant tensor containing a string hello = tf.constant("Hello, TensorFlow!") # In TensorFlow 2.x, eager execution is enabled by default # So we can print the value of the tensor directly print(hello.numpy().decode()) # Output: Hello, TensorFlow!
import tensorflow as tf # Create a 2D tensor tensor = tf.constant([[1, 2], [3, 4]]) # Display tensor properties print("Rank (number of dimensions):", tf.rank(tensor).numpy()) print("Shape (dimensions):", tensor.shape) print("Data type:", tensor.dtype)
# Create tensors a = tf.constant([1, 2, 3]) b = tf.ones([2, 2]) c = tf.zeros([2, 3]) d = tf.random.uniform([2, 2], minval=0, maxval=10) # Output tensors print("Constant:", a) print("Ones:", b) print("Zeros:", c) print("Random Uniform:", d)
# Define a constant and a variable const_tensor = tf.constant([5, 6]) var_tensor = tf.Variable([1.0, 2.0]) # Modify the variable var_tensor.assign_add([1.0, 1.0]) print("Constant Tensor:", const_tensor) print("Modified Variable Tensor:", var_tensor)
x = tf.constant([2.0, 3.0]) y = tf.constant([1.0, 4.0]) print("Add:", tf.add(x, y)) print("Multiply:", tf.multiply(x, y)) print("Power:", tf.pow(x, 2))
# Reshape a tensor original = tf.constant([[1, 2], [3, 4], [5, 6]]) reshaped = tf.reshape(original, [2, 3]) # Broadcast a = tf.constant([[1], [2], [3]]) # shape (3, 1) b = tf.constant([4, 5]) # shape (2,) result = a + b # broadcasting shapes to (3, 2) print("Reshaped Tensor:\n", reshaped) print("Broadcasted Result:\n", result)
tensor = tf.constant([[10, 20], [30, 40], [50, 60]]) # Index and slice print("First row:", tensor[0]) print("Second column:", tensor[:, 1]) print("Last two rows:\n", tensor[1:, :])
zeros_tensor = tf.zeros([2, 2]) ones_tensor = tf.ones([2, 2]) uniform_tensor = tf.random.uniform([2, 2], 0, 1) normal_tensor = tf.random.normal([2, 2], mean=0.0, stddev=1.0) print("Zeros:\n", zeros_tensor) print("Ones:\n", ones_tensor) print("Uniform Random:\n", uniform_tensor) print("Normal Random:\n", normal_tensor)
float_tensor = tf.constant([1.5, 2.3, 3.7], dtype=tf.float32) int_tensor = tf.cast(float_tensor, tf.int32) print("Float Tensor:", float_tensor) print("Converted to Int:", int_tensor)
tensor = tf.constant([[1, 2], [3, 4]]) print("Sum of all elements:", tf.reduce_sum(tensor)) print("Mean of each column:", tf.reduce_mean(tensor, axis=0))
a = tf.constant([2, 4, 6]) b = tf.constant([1, 4, 8]) print("Equal:", tf.equal(a, b)) print("Greater:", tf.greater(a, b)) print("Logical AND:", tf.logical_and(a > 2, b > 2))
x = tf.range(1000000, dtype=tf.float32) y = x * 2.0 # Vectorized operation print("First 5 results:", y[:5])
@tf.function def multiply(a, b): return a * b x = tf.constant([2.0, 3.0]) y = tf.constant([4.0, 5.0]) print("Graph output:", multiply(x, y))
with tf.device('/CPU:0'): cpu_tensor = tf.constant([1.0, 2.0]) # If GPU is available: # with tf.device('/GPU:0'): # gpu_tensor = tf.constant([3.0, 4.0]) print("Tensor on CPU:", cpu_tensor)
x = tf.constant([-1, 0, 1]) # Assert values are non-negative try: tf.debugging.assert_non_negative(x) except tf.errors.InvalidArgumentError as e: print("Debugging Error:", e.message)
@tf.function def compute_squared_sum(tensor): return tf.reduce_sum(tensor ** 2) data = tf.constant([1.0, 2.0, 3.0]) print("Squared sum:", compute_squared_sum(data))
import tensorflow as tf # Create a basic dataset from a list dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5]) for item in dataset: print(item.numpy())
import numpy as np array = np.array([10, 20, 30]) ds = tf.data.Dataset.from_tensor_slices(array) for val in ds: print(val.numpy())
dataset = tf.data.Dataset.range(10) dataset = dataset.shuffle(5).map(lambda x: x * 2).batch(3) for batch in dataset: print(batch.numpy())
csv_ds = tf.data.experimental.make_csv_dataset( file_pattern='sample.csv', batch_size=2, num_epochs=1, ignore_errors=True ) for row in csv_ds: print(row)
image_path = 'example.jpg' image = tf.io.read_file(image_path) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [128, 128]) print("Image shape:", image.shape)
text_ds = tf.data.TextLineDataset("sample.txt") for line in text_ds.take(3): print(line.numpy().decode())
# Writing an example def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[tf.io.encode_base64(value).numpy()])) example = tf.train.Example(features=tf.train.Features(feature={ 'feature_name': _bytes_feature(tf.constant(b"example")) })) # Serialize to string serialized = example.SerializeToString() print(serialized)
ds = tf.data.Dataset.range(100).batch(10).prefetch(1) for batch in ds.take(2): print(batch.numpy())
image = tf.random.uniform([256, 256, 3]) # Apply augmentations flipped = tf.image.flip_left_right(image) bright = tf.image.random_brightness(image, max_delta=0.5) print("Augmented image shape:", flipped.shape)
dataset = tf.data.Dataset.range(100) train = dataset.take(60) val = dataset.skip(60).take(20) test = dataset.skip(80) print("Train sample:", list(train.as_numpy_iterator())[:5]) print("Validation sample:", list(val.as_numpy_iterator())[:5]) print("Test sample:", list(test.as_numpy_iterator())[:5])
large_ds = tf.data.TFRecordDataset("large_data.tfrecord.gz", compression_type="GZIP") large_ds = large_ds.batch(32).prefetch(tf.data.AUTOTUNE) print("Dataset ready for large input pipelines.")
def custom_gen(): for i in range(5): yield i * 2 custom_ds = tf.data.Dataset.from_generator(custom_gen, output_types=tf.int32) for item in custom_ds: print(item.numpy())
import pandas as pd df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) ds = tf.data.Dataset.from_tensor_slices(dict(df)) for item in ds: print(item)
def stream(): import time for i in range(3): yield i time.sleep(1) stream_ds = tf.data.Dataset.from_generator(stream, output_types=tf.int32) for item in stream_ds: print(item.numpy())
dataset = tf.data.Dataset.range(1000) dataset = dataset.cache().shuffle(100).batch(64).prefetch(tf.data.AUTOTUNE) print("Optimized input pipeline built.")
import tensorflow as tf # Define a basic model model = tf.keras.Sequential([ tf.keras.layers.Dense(4, input_shape=(3,), activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) print(model.summary())
# One neuron layer example layer = tf.keras.layers.Dense(1, activation='relu', input_shape=(2,)) output = layer(tf.constant([[1.0, 2.0]])) print("Output:", output.numpy())
x = tf.constant([-1.0, 0.0, 1.0]) print("ReLU:", tf.nn.relu(x).numpy()) print("Sigmoid:", tf.nn.sigmoid(x).numpy()) print("Tanh:", tf.nn.tanh(x).numpy())
x = tf.constant([[1.0]]) y = tf.constant([[0.0]]) model = tf.keras.Sequential([tf.keras.layers.Dense(1)]) loss_fn = tf.keras.losses.MSE with tf.GradientTape() as tape: prediction = model(x) loss = loss_fn(y, prediction) grads = tape.gradient(loss, model.trainable_variables) print("Gradients:", grads)
y_true = tf.constant([1.0, 0.0]) y_pred = tf.constant([0.8, 0.2]) mse = tf.keras.losses.MSE(y_true, y_pred) bce = tf.keras.losses.BinaryCrossentropy()(y_true, y_pred) print("MSE:", mse.numpy()) print("Binary Crossentropy:", bce.numpy())
opt1 = tf.keras.optimizers.SGD() opt2 = tf.keras.optimizers.Adam() opt3 = tf.keras.optimizers.RMSprop() print("Available optimizers created.")
# Simulate by comparing train/val accuracy trends print("Detect using validation metrics — use dropout, regularization to prevent overfitting.")
model = tf.keras.Sequential([ tf.keras.layers.Dense(32, kernel_regularizer=tf.keras.regularizers.l2(0.01)), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(1) ]) print("Model with L2 regularization and dropout.")
model = tf.keras.Sequential([ tf.keras.layers.Dense(64), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu') ]) print("Model with batch normalization.")
# Train with batch size and epochs model.compile(optimizer='adam', loss='mse') model.fit(tf.random.normal([100, 3]), tf.random.normal([100, 1]), epochs=2, batch_size=10)
import matplotlib.pyplot as plt loss_history = [] # Dummy training for step in range(10): loss = 1 / (step + 1) loss_history.append(loss) plt.plot(loss_history) plt.title("Gradient Descent Loss Curve") plt.xlabel("Step") plt.ylabel("Loss") plt.show()
y_true = [1, 0, 1, 1] y_pred = [1, 0, 0, 1] acc = tf.keras.metrics.BinaryAccuracy() acc.update_state(y_true, y_pred) print("Accuracy:", acc.result().numpy())
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.1, decay_steps=100, decay_rate=0.96 ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) print("Learning rate schedule created.")
# Binary classification model binary_model = tf.keras.Sequential([ tf.keras.layers.Dense(1, activation='sigmoid') ]) # Multiclass model multi_model = tf.keras.Sequential([ tf.keras.layers.Dense(3, activation='softmax') ]) print("Defined binary and multiclass models.")
mlp = tf.keras.Sequential([ tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(32, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) mlp.compile(optimizer='adam', loss='binary_crossentropy') print("MLP model defined.")
import tensorflow as tf print("Keras version:", tf.keras.__version__)
model = tf.keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=(10,)), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary()
inputs = tf.keras.Input(shape=(10,)) x = tf.keras.layers.Dense(64, activation='relu')(inputs) outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.summary()
input_layer = tf.keras.Input(shape=(20,)) hidden_layer = tf.keras.layers.Dense(10, activation='relu')(input_layer) output_layer = tf.keras.layers.Dense(3, activation='softmax')(hidden_layer) model = tf.keras.Model(inputs=input_layer, outputs=output_layer) model.summary()
model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] )
import numpy as np x_train = np.random.random((100, 20)) y_train = np.random.randint(3, size=(100,)) model.fit(x_train, y_train, epochs=5, batch_size=16)
x_test = np.random.random((20, 20)) y_test = np.random.randint(3, size=(20,)) loss, accuracy = model.evaluate(x_test, y_test) print("Test loss:", loss) print("Test accuracy:", accuracy)
new_data = np.random.random((3, 20)) predictions = model.predict(new_data) print("Predictions:\n", predictions)
model.save('my_model.h5') # Save entire model # Load model later loaded_model = tf.keras.models.load_model('my_model.h5') print("Model loaded successfully.")
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint("best_model.h5", save_best_only=True) earlystop_cb = tf.keras.callbacks.EarlyStopping(patience=3) model.fit( x_train, y_train, epochs=20, validation_split=0.2, callbacks=[checkpoint_cb, earlystop_cb] )
early_stop = tf.keras.callbacks.EarlyStopping( monitor='val_loss', patience=2, restore_best_weights=True ) model.fit(x_train, y_train, epochs=50, validation_split=0.2, callbacks=[early_stop])
tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir='./logs') model.fit(x_train, y_train, epochs=5, callbacks=[tensorboard_cb]) # Run `tensorboard --logdir=./logs` in terminal to view
def custom_mse(y_true, y_pred): return tf.reduce_mean(tf.square(y_true - y_pred)) model.compile(optimizer='adam', loss=custom_mse) print("Model compiled with custom loss function.")
base_model = tf.keras.applications.MobileNetV2(input_shape=(224,224,3), include_top=False, weights='imagenet') base_model.trainable = False model = tf.keras.Sequential([ base_model, tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(1, activation='sigmoid') ]) print("Transfer learning model created.")
# Always start with: model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # Use validation data and callbacks model.fit(x_train, y_train, validation_split=0.2, epochs=10, callbacks=[earlystop_cb])
# TensorFlow supports many computer vision tasks like classification, detection, and segmentation. print("TensorFlow Computer Vision Capabilities Ready")
# Example: Simple image classification model architecture model = tf.keras.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28, 1)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.summary()
import tensorflow as tf def preprocess(image): image = tf.image.resize(image, [28, 28]) image = image / 255.0 # normalize pixel values return image sample_image = tf.random.uniform([100, 100, 3]) processed_image = preprocess(sample_image) print("Processed image shape:", processed_image.shape)
model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation='softmax') ]) model.summary()
model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation='softmax') ]) model.summary()
model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(64, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.summary()
data_augmentation = tf.keras.Sequential([ tf.keras.layers.RandomFlip('horizontal'), tf.keras.layers.RandomRotation(0.1), tf.keras.layers.RandomZoom(0.1) ]) sample_image = tf.expand_dims(processed_image, 0) # batch dimension augmented_image = data_augmentation(sample_image) print("Augmented image shape:", augmented_image.shape)
base_model = tf.keras.applications.VGG16( input_shape=(224, 224, 3), include_top=False, weights='imagenet' ) base_model.trainable = False print("Loaded VGG16 base model")
base_model.trainable = True for layer in base_model.layers[:-5]: layer.trainable = False print("Fine-tuning last 5 layers")
# TensorFlow supports models like SSD, Faster R-CNN for object detection. print("Object detection models available via TensorFlow Model Zoo")
import tensorflow_hub as hub model = hub.KerasLayer("https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/4") print("Loaded MobileNetV2 from TensorFlow Hub")
# Segmentation uses models like U-Net or DeepLab print("Use segmentation models for pixel-level classification")
inputs = tf.keras.Input(shape=(64, 64, 3)) x = tf.keras.layers.Conv2D(32, 3, activation='relu')(inputs) x = tf.keras.layers.MaxPooling2D()(x) x = tf.keras.layers.Conv2D(64, 3, activation='relu')(x) x = tf.keras.layers.GlobalAveragePooling2D()(x) outputs = tf.keras.layers.Dense(10, activation='softmax')(x) custom_cnn = tf.keras.Model(inputs, outputs) custom_cnn.summary()
import matplotlib.pyplot as plt layer_output = custom_cnn.layers[1].output # First Conv layer output print("Use TensorBoard or matplotlib to visualize activations and filters")
from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train[..., tf.newaxis]/255.0 x_test = x_test[..., tf.newaxis]/255.0 model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28,28,1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test))