# Simple deep learning example using Keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(100,)))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy')
# Define a simple feedforward neural network in PyTorch
import torch
import torch.nn as nn
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(100, 64)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.fc2(x)
return x
# Backpropagation example using PyTorch
import torch
import torch.nn as nn
model = nn.Linear(10, 1)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
inputs = torch.randn(5, 10)
targets = torch.randn(5, 1)
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward() # compute gradients
optimizer.step() # update weights
# Using activation functions in TensorFlow Keras
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(Dense(64, input_shape=(100,)))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# Example: Dropout layer in Keras
from tensorflow.keras.layers import Dropout
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(100,)))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# Simple CNN example with Keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28,28,1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
# Simple RNN example in Keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, Dense
model = Sequential()
model.add(SimpleRNN(50, input_shape=(10, 1)))
model.add(Dense(1))
# Using ReLU helps mitigate vanishing gradients
from tensorflow.keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(64, input_shape=(100,)))
model.add(Activation('relu'))
model.add(Dense(10, activation='softmax'))
# Applying dropout in Keras
from tensorflow.keras.layers import Dropout
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(100,)))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# Batch normalization example in Keras
from tensorflow.keras.layers import BatchNormalization, Dense
model = Sequential()
model.add(Dense(64, input_shape=(100,)))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))
# Transfer learning example with Keras
from tensorflow.keras.applications import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(224,224,3))
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
# Example: Autoencoder for unsupervised learning in Keras
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
input_layer = Input(shape=(100,))
encoded = Dense(32, activation='relu')(input_layer)
decoded = Dense(100, activation='sigmoid')(encoded)
autoencoder = Model(input_layer, decoded)
autoencoder.compile(optimizer='adam', loss='mse')
# Simple autoencoder in Keras
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
input_layer = Input(shape=(784,))
encoded = Dense(64, activation='relu')(input_layer)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input_layer, decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
# Gradient descent optimization in PyTorch
import torch
import torch.nn as nn
model = nn.Linear(10, 1)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
inputs = torch.randn(5, 10)
targets = torch.randn(5, 1)
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# Example of using loss functions in Keras
from tensorflow.keras.losses import SparseCategoricalCrossentropy
model.compile(optimizer='adam', loss=SparseCategoricalCrossentropy())
# Early stopping callback in Keras
from tensorflow.keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor='val_loss', patience=3)
model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, callbacks=[early_stop])
# Using Adam optimizer in Keras
model.compile(optimizer='adam', loss='categorical_crossentropy')
# Example: SGD optimizer in PyTorch
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# Visualization typically done with tools like matplotlib
# (no simple code snippet for this)
# GAN skeleton code in PyTorch (simplified)
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(100, 784)
def forward(self, x):
return torch.sigmoid(self.fc(x))
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(784, 1)
def forward(self, x):
return torch.sigmoid(self.fc(x))
# Example: Using pretrained ResNet in PyTorch
import torchvision.models as models
resnet = models.resnet18(pretrained=True)
# Freeze layers
for param in resnet.parameters():
param.requires_grad = False
# Replace last layer for new task
import torch.nn as nn
resnet.fc = nn.Linear(resnet.fc.in_features, 10) # 10 classes
# Dropout example in Keras
from tensorflow.keras.layers import Dropout
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5)) # 50% dropout rate
# Batch normalization in PyTorch
import torch.nn as nn
bn = nn.BatchNorm1d(num_features=128)
# Apply after linear layer
# Using ReLU activation helps mitigate vanishing gradients
import torch.nn.functional as F
x = F.relu(x)
# CNN layer example in Keras
from tensorflow.keras.layers import Conv2D
model.add(Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1)))
# Simple RNN in Keras
from tensorflow.keras.layers import SimpleRNN
model.add(SimpleRNN(50, input_shape=(timesteps, features)))
# LSTM layer in Keras
from tensorflow.keras.layers import LSTM
model.add(LSTM(100, input_shape=(timesteps, features)))
# Using HuggingFace Transformer model
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
model = AutoModel.from_pretrained('bert-base-uncased')
# Simplified self-attention computation
import torch
query = torch.rand(1,5,64) # batch, seq_len, dim
key = torch.rand(1,5,64)
value = torch.rand(1,5,64)
scores = torch.matmul(query, key.transpose(-2,-1)) / (64**0.5)
weights = torch.nn.functional.softmax(scores, dim=-1)
output = torch.matmul(weights, value)
# Attention example pseudo-code
context_vector = sum(attention_weights * encoder_outputs)
# Fine-tuning last layers in PyTorch
for param in model.parameters():
param.requires_grad = False
for param in model.fc.parameters():
param.requires_grad = True
# Train only last fc layer
# Data augmentation example in Keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(rotation_range=20, horizontal_flip=True)
# Gradient clipping example in PyTorch
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
# EarlyStopping in Keras
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model.fit(X_train, y_train, validation_data=(X_val, y_val), callbacks=[early_stopping])
# Training with epochs in Keras
model.fit(X_train, y_train, epochs=10)
# Example of L2 regularization in Keras
from tensorflow.keras.regularizers import l2
model.add(Dense(64, kernel_regularizer=l2(0.01)))
# Setting learning rate in PyTorch
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# L1 regularization example in Keras
from tensorflow.keras.regularizers import l1
model.add(Dense(64, kernel_regularizer=l1(0.01)))
# Example: batch_size=32 in Keras
model.fit(X_train, y_train, batch_size=32)
# Using Adam optimizer in TensorFlow
model.compile(optimizer='adam', loss='categorical_crossentropy')
# Example loss functions
# For classification: categorical_crossentropy
# For regression: mean_squared_error
# Supervised example: classification
# Unsupervised example: clustering
# Simplified RL loop pseudocode
state = env.reset()
while not done:
action = agent.act(state)
next_state, reward, done = env.step(action)
agent.learn(state, action, reward, next_state)
state = next_state
# Deep learning example: neural networks
# ML example: decision trees
# AI includes rule-based systems too
# Dimensionality reduction methods:
# PCA, t-SNE, UMAP
# Autoencoder example in Keras
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
input_img = Input(shape=(784,))
encoded = Dense(64, activation='relu')(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input_img, decoded)
# GAN simplified flow pseudocode
# Generator creates fake samples
# Discriminator tries to distinguish real vs fake
# Policy example pseudocode
def policy(state):
return action
# Pseudocode for backpropagation
loss.backward()
optimizer.step()
# Confusion matrix example using sklearn
from sklearn.metrics import confusion_matrix
y_true = [0, 1, 0, 1]
y_pred = [0, 0, 0, 1]
cm = confusion_matrix(y_true, y_pred)
print(cm)
# Example: High train accuracy but low test accuracy
# Example of dropout in Keras
from tensorflow.keras.layers import Dropout
model.add(Dropout(0.5))
# Example: low train and test accuracy
# L2 regularization example in scikit-learn
from sklearn.linear_model import Ridge
model = Ridge(alpha=1.0)
# K-Fold CV in scikit-learn
from sklearn.model_selection import KFold
kf = KFold(n_splits=5)
# Model complexity affects bias and variance
# Example: ReLU in TensorFlow
from tensorflow.keras.layers import ReLU
layer = ReLU()
# ReLU helps mitigate vanishing gradients compared to Sigmoid
# Dropout example in Keras
from tensorflow.keras.layers import Dropout
model.add(Dropout(0.5))
# BatchNorm example in TensorFlow
from tensorflow.keras.layers import BatchNormalization
model.add(BatchNormalization())
# Simple CNN layer example in Keras
from tensorflow.keras.layers import Conv2D
model.add(Conv2D(32, (3,3), activation='relu'))
# MaxPooling example in Keras
from tensorflow.keras.layers import MaxPooling2D
model.add(MaxPooling2D(pool_size=(2, 2)))
# Simple RNN layer example in Keras
from tensorflow.keras.layers import SimpleRNN
model.add(SimpleRNN(50))
# LSTM example in Keras
from tensorflow.keras.layers import LSTM
model.add(LSTM(50))
# Transformer attention pseudocode
# Attention(Q, K, V) = softmax(QK^T / sqrt(d_k)) V
# Scaled dot-product attention example
# Using pre-trained model in Keras
from tensorflow.keras.applications import VGG16
base_model = VGG16(weights='imagenet', include_top=False)
# Freeze base layers and train top layers example
for layer in base_model.layers:
layer.trainable = False
# Example in Keras ImageDataGenerator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(rotation_range=20, horizontal_flip=True)
# Classification example: spam detection
# Regression example: house price prediction
# Example in scikit-learn
from sklearn.metrics import precision_score, recall_score, f1_score
# Example in scikit-learn
from sklearn.metrics import roc_curve, auc
# Simple gradient descent example in Python
learning_rate = 0.01
weights -= learning_rate * gradient
# SGD example with mini-batches
# Using Adam optimizer in Keras
from tensorflow.keras.optimizers import Adam
model.compile(optimizer=Adam(), loss='categorical_crossentropy')
# EarlyStopping callback in Keras
from tensorflow.keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor='val_loss', patience=3)
# Confusion matrix in scikit-learn
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred)
# Simple Q-learning pseudocode
Q(s, a) = Q(s, a) + α [r + γ max_a' Q(s', a') - Q(s, a)]
# Example: REINFORCE (policy-based) vs Q-learning (value-based)
# Example: tokenization in Python using NLTK
import nltk
tokens = nltk.word_tokenize("Hello world!")
# Example: Word2Vec using Gensim
from gensim.models import Word2Vec
model = Word2Vec(sentences, vector_size=100, window=5)
# Example architecture: Encoder-Decoder
# Example using spaCy
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp("Hello world!")
tokens = [token.text for token in doc]
# Example using spaCy
for ent in doc.ents:
print(ent.text, ent.label_)
# Example using TextBlob
from textblob import TextBlob
analysis = TextBlob("I love this!")
print(analysis.sentiment.polarity)
# Example using Gensim LDA
from gensim.models.ldamodel import LdaModel
# Example approach: Lesk algorithm
# Example: POS tagging
# Example: GPT predicts next token
# TF-IDF in scikit-learn
from sklearn.feature_extraction.text import TfidfVectorizer
# Examples: classification (supervised), clustering (unsupervised)
# Example: KMeans clustering
from sklearn.cluster import KMeans
# PCA example
from sklearn.decomposition import PCA
# Example: creating new features from date columns
# Example: dropout in Keras
from tensorflow.keras.layers import Dropout
# L2 regularization in Keras
from tensorflow.keras.regularizers import l2
# BatchNormalization layer in Keras
from tensorflow.keras.layers import BatchNormalization
# Dropout layer example
from tensorflow.keras.layers import Dropout
# KFold cross-validation
from sklearn.model_selection import KFold
# Example with GridSearchCV
from sklearn.model_selection import GridSearchCV
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test))
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test))
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dropout(0.2),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test))
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1) / 255.0
x_test = x_test.reshape(-1, 28, 28, 1) / 255.0
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential([
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test))
for epoch in range(num_epochs): # Loop over the dataset multiple times (epochs)
model.train() # Set the model to training mode
for inputs, labels in train_loader: # Iterate over batches from the training set
optimizer.zero_grad() # Clear gradients from the previous step
outputs = model(inputs) # Perform forward pass to get predictions
loss = criterion(outputs, labels) # Compute the loss between predictions and true labels
loss.backward() # Backpropagate the loss to compute gradients
optimizer.step() # Update model weights using the optimizer
model.eval() # Switch to evaluation mode (e.g., disables dropout)
with torch.no_grad(): # Disable gradient computation for evaluation
# Evaluate on validation set
# Typically: loop through val_loader, run model(inputs), compute accuracy/loss
pass # Placeholder where validation code would go
from tensorflow.keras import layers, models # Import necessary Keras components
input_img = layers.Input(shape=(784,)) # Input layer for flattened 28x28 image (MNIST)
encoded = layers.Dense(64, activation='relu')(input_img) # Encoding layer with 64 units and ReLU activation
decoded = layers.Dense(784, activation='sigmoid')(encoded) # Decoding layer with sigmoid activation to output 784 values
autoencoder = models.Model(input_img, decoded) # Define the autoencoder model from input to reconstructed output
autoencoder.compile(optimizer='adam', loss='binary_crossentropy') # Compile model with Adam optimizer and binary crossentropy loss
from tensorflow.keras import layers, models # Import the layers and models modules from Keras
model = models.Sequential([ # Define a sequential model
layers.Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1)), # 32 filters, 3x3 kernel, ReLU activation, grayscale input
layers.MaxPooling2D((2,2)), # Downsample with a 2x2 max pooling layer
layers.Flatten(), # Flatten the 2D feature maps to 1D
layers.Dense(64, activation='relu'), # Fully connected layer with 64 neurons and ReLU
layers.Dense(10, activation='softmax') # Output layer for 10 classes with softmax activation
])
model.compile( # Compile the model with appropriate configurations
optimizer='adam', # Use Adam optimizer
loss='sparse_categorical_crossentropy', # Suitable for integer-labeled classification
metrics=['accuracy'] # Track accuracy during training
)
# Freeze all layers in the model for param in model.parameters():Explanation:
param.requires_grad = False # Disables gradient computation for each parameter
# Unfreeze only the final fully connected layer for param in model.fc.parameters():
param.requires_grad = True # Enables training only for the FC layer
import torchExplanation:
import torch.nn as nn
# Create an Embedding layer with 1000 possible tokens and embedding size of 50
embedding = nn.Embedding(num_embeddings=1000, embedding_dim=50)
# Example input tensor containing token indices
input_ids = torch.LongTensor([1, 2, 3])
# Pass input indices through the embedding layer to get dense vectors
embedded = embedding(input_ids)
# Print the shape of the output embedding tensor
print(embedded.shape) # Output shape: (3, 50)
import tensorflow as tfExplanation:
from tensorflow.keras.layers import Dense, Dropout
# Build a simple Sequential model with Dropout
model = tf.keras.Sequential([
Dense(64, activation='relu'), # Fully connected layer with 64 units and ReLU activation
Dropout(0.5), # Dropout layer that randomly drops 50% of the neurons during training
Dense(10, activation='softmax') # Output layer with 10 units for classification
])
import tensorflow as tfExplanation:
from tensorflow.keras.layers import Dense, Dropout
# Define a Sequential model
model = tf.keras.Sequential([
Dense(64, activation='relu'), # Dense layer with 64 units and ReLU activation
Dropout(0.5), # Dropout layer that drops 50% of inputs during training
Dense(10, activation='softmax') # Output layer with 10 units (for classification)
])
import torch.nn.utilsExplanation:
# Assuming 'model' is your neural network model
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) # Clips gradients to have max norm 1.0
import torch.nn as nn # Import PyTorch neural network moduleExplanation:
class SimpleCNN(nn.Module): # Define a subclass of nn.Module
def __init__(self):
super().__init__() # Initialize the base class
self.conv = nn.Conv2d(3, 16, 3, 1) # 2D conv layer: 3 input channels, 16 output channels, 3x3 kernel, stride 1
self.bn = nn.BatchNorm2d(16) # Batch normalization on 16 channels
self.relu = nn.ReLU() # ReLU activation function
def forward(self, x): # Forward pass method
x = self.conv(x) # Apply convolution
x = self.bn(x) # Apply batch normalization
x = self.relu(x) # Apply ReLU activation
return x # Return the processed output
import torch # Import PyTorchExplanation:
import torch.nn.functional as F # Import functional API for activation functions
def scaled_dot_product_attention(query, key, value):
d_k = query.size(-1) # Get the size of the last dimension of query
# Calculate raw attention scores by matrix multiplication of query and key transpose, scaled by sqrt(d_k)
scores = torch.matmul(query, key.transpose(-2, -1)) / torch.sqrt(torch.tensor(d_k, dtype=torch.float32))
weights = F.softmax(scores, dim=-1) # Normalize scores to probabilities with softmax
output = torch.matmul(weights, value) # Multiply weights by values to get weighted output
return output, weights # Return the output and attention weights
# Save the model's state dictionary to a fileExplanation:
torch.save(model.state_dict(), 'model.pth')
# Load the saved state dictionary into the model
model.load_state_dict(torch.load('model.pth'))
# Set the model to evaluation mode (disables dropout, batchnorm updates)
model.eval()
import torch.nn as nnExplanation:
# Define the loss criterion for classification
criterion = nn.CrossEntropyLoss()
# Get model output for inputs (logits, not softmax probabilities)
output = model(inputs)
# Calculate the loss comparing output and target labels
loss = criterion(output, targets)
from torchvision import transformsExplanation:
transform = transforms.Compose([
transforms.RandomHorizontalFlip(), # Randomly flip image horizontally
transforms.RandomRotation(15), # Randomly rotate image by ±15 degrees
transforms.ToTensor() # Convert image to tensor
])
import torch.optim as optimExplanation:
optimizer = optim.Adam(model.parameters(), lr=0.01) # Initialize Adam optimizer with learning rate 0.01
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # Reduce LR by factor 0.1 every 10 epochs
for epoch in range(30):
train(...) # Your training function for one epoch
scheduler.step() # Decays learning rate after each epoch
import torch.optim as optimExplanation:
optimizer = optim.Adam(model.parameters(), lr=0.01) # Initialize Adam optimizer with initial LR 0.01
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # LR decays by 0.1 every 10 epochs
for epoch in range(30):
train(...) # Call your training loop function here
scheduler.step() # Update learning rate based on schedule after each epoch
from tensorflow.keras.callbacks import EarlyStoppingExplanation:
early_stop = EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)
model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=50, callbacks=[early_stop])
from tensorflow.keras.callbacks import EarlyStoppingExplanation:
early_stop = EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)
model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=50, callbacks=[early_stop])
from torch.utils.data import Dataset, DataLoaderExplanation:
from PIL import Image
import os
from torchvision import transforms
class MyDataset(Dataset):
def __init__(self, folder):
self.files = os.listdir(folder) # List all files in the folder
self.folder = folder
def __len__(self):
return len(self.files) # Return number of images
def __getitem__(self, idx):
img_path = os.path.join(self.folder, self.files[idx]) # Get path of image
image = Image.open(img_path).convert("RGB") # Open and convert image to RGB
return transforms.ToTensor()(image) # Convert image to tensor
dataset = MyDataset("images") # Create dataset instance with folder path
loader = DataLoader(dataset, batch_size=32, shuffle=True) # Create DataLoader for batching and shuffling
import tensorflow as tfExplanation:
def parse_fn(example):
image = tf.image.decode_jpeg(example) # Decode JPEG encoded image bytes
image = tf.image.resize(image, [224, 224]) # Resize image to 224x224
return image
dataset = tf.data.Dataset.list_files("images/*.jpg") # List all jpg files in 'images' folder
dataset = dataset.map(lambda x: parse_fn(tf.io.read_file(x))) # Read file and parse image
dataset = dataset.batch(32) # Batch images in groups of 32
dataset = dataset.prefetch(tf.data.AUTOTUNE) # Prefetch batches for efficient pipeline
import tensorflow as tfExplanation:
# Assume you have a trained or loaded Keras model
model = tf.keras.Sequential([...]) # Replace [...] with your model layers
model.compile(...) # Compile the model with optimizer, loss, metrics
# Create a TFLite converter from the Keras model
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert() # Convert the model to TFLite format
# Save the converted TFLite model to a file
with open("model.tflite", "wb") as f:
f.write(tflite_model)
import tensorflow as tfExplanation:
# Assume you have a trained or loaded Keras model
model = tf.keras.Sequential([...]) # Replace [...] with your model layers
model.compile(...) # Compile the model with optimizer, loss, and metrics
# Convert the Keras model to TensorFlow Lite format
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert() # Perform conversion
# Save the TensorFlow Lite model to a file
with open("model.tflite", "wb") as f:
f.write(tflite_model)
import torch.onnxExplanation:
# Load pretrained ResNet18 model from PyTorch Hub
model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=True)
# Create a dummy input tensor matching the model's expected input shape
dummy_input = torch.randn(1, 3, 224, 224)
# Export the model to ONNX format file named "resnet18.onnx"
torch.onnx.export(model, dummy_input, "resnet18.onnx")
import torch.nn.utils.prune as pruneExplanation:
import torch.nn as nn
# Define a simple linear layer with 100 inputs and 10 outputs
model = nn.Linear(100, 10)
# Apply random unstructured pruning on the 'weight' parameter with 30% sparsity
prune.random_unstructured(model, name="weight", amount=0.3)
# Print the weights to observe pruning (some will be zero now)
print(model.weight)
import torch.quantizationExplanation:
# Load a pretrained ResNet18 model (float32)
model_fp32 = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)
model_fp32.eval()
# Apply dynamic quantization to Linear layers
model_int8 = torch.quantization.quantize_dynamic(
model_fp32, {torch.nn.Linear}, dtype=torch.qint8
)
# Print the quantized model summary
print(model_int8)
from transformers import ViTFeatureExtractor, ViTForImageClassificationExplanation:
from PIL import Image
import requests
import torch
# Load pretrained ViT model and feature extractor
model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224")
extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
# Load image from URL
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/image_classification.jpeg"
image = Image.open(requests.get(url, stream=True).raw)
# Preprocess image and convert to PyTorch tensor batch
inputs = extractor(images=image, return_tensors="pt")
# Forward pass through model
outputs = model(**inputs)
# Get predicted class ID with highest logit score
print(outputs.logits.argmax(-1))
# Clone the YOLOv5 repositoryExplanation:
git clone https://github.com/ultralytics/yolov5
cd yolov5
# Install Python dependencies
pip install -r requirements.txt
import torchExplanation:
# Load YOLOv5 small pre-trained model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
# Image URL for inference
img = 'https://ultralytics.com/images/zidane.jpg'
# Perform inference
results = model(img)
# Show image with detected bounding boxes
results.show()
from tensorflow.keras.applications import ResNet50Explanation:
# Load ResNet50 model with pretrained weights
model = ResNet50(weights='imagenet')
from tensorflow.keras.models import SequentialExplanation:
from tensorflow.keras.layers import Dense, BatchNormalization
model = Sequential([
Dense(64, input_shape=(100,), activation='relu'), # Dense layer with 64 units and ReLU activation
BatchNormalization(), # Normalize activations to improve training stability
Dense(1, activation='sigmoid') # Output layer for binary classification
])
import torchExplanation:
import torch.nn as nn
# Generator network
class Generator(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(input_dim, 128), # Fully connected layer from noise input to hidden layer
nn.ReLU(), # Activation function
nn.Linear(128, output_dim), # Output layer to generate data (e.g., image pixels)
nn.Tanh() # Output scaled between -1 and 1
)
def forward(self, x):
return self.net(x)
# Discriminator network
class Discriminator(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(input_dim, 128), # Fully connected layer from input data to hidden layer
nn.LeakyReLU(0.2), # LeakyReLU activation to allow small gradients when inactive
nn.Linear(128, 1), # Output layer giving probability that input is real
nn.Sigmoid() # Sigmoid activation to output probability between 0 and 1
)
def forward(self, x):
return self.net(x)
# Create instances
G = Generator(100, 784) # Generator takes 100-dim noise vector, outputs 784-dim (e.g. 28x28 image)
D = Discriminator(784) # Discriminator takes 784-dim input to classify real/fake
from transformers import BertTokenizer, BertForSequenceClassificationExplanation:
from torch.nn.functional import softmax
# Load pre-trained tokenizer and model
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # Tokenizer splits text into tokens
model = BertForSequenceClassification.from_pretrained('bert-base-uncased') # Pre-trained BERT model
# Tokenize input text and convert to tensor format
inputs = tokenizer("I love deep learning!", return_tensors="pt")
# Forward pass through the model
outputs = model(**inputs)
probs = softmax(outputs.logits, dim=1) # Apply softmax to get probabilities for each class
print(probs) # Prints class probabilities
import torch import torch.nn as nn class TransformerBlock(nn.Module):Explanation:
def __init__(self, embed_size, heads, ff_hidden, dropout):
super().__init__()
self.attention = nn.MultiheadAttention(embed_size, heads) # Multi-head self-attention
self.norm1 = nn.LayerNorm(embed_size) # Normalization after attention
self.norm2 = nn.LayerNorm(embed_size) # Normalization after feed-forward
self.feed_forward = nn.Sequential(
nn.Linear(embed_size, ff_hidden), # First linear layer in feed-forward
nn.ReLU(), # Activation
nn.Linear(ff_hidden, embed_size) # Second linear layer back to embed size
)
self.dropout = nn.Dropout(dropout) # Dropout for regularization
def forward(self, x):
attn_output, _ = self.attention(x, x, x) # Self-attention on input
x = self.norm1(attn_output + x) # Add & Norm
ff_output = self.feed_forward(x) # Feed-forward network
x = self.norm2(ff_output + x) # Add & Norm
return x
from tensorflow.keras.models import SequentialExplanation:
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(64, 64, 3)), # 32 filters, 3x3 kernel, input shape RGB images 64x64
MaxPooling2D(pool_size=(2, 2)), # Downsamples spatial dims by 2
Conv2D(64, (3, 3), activation='relu'), # 64 filters, 3x3 kernel
MaxPooling2D(pool_size=(2, 2)), # Another downsampling layer
Flatten(), # Flattens 2D feature maps into 1D vector
Dense(128, activation='relu'), # Fully connected layer with 128 units
Dense(10, activation='softmax') # Output layer for 10 classes with probabilities
])
from tensorflow.keras.models import SequentialExplanation:
from tensorflow.keras.layers import Dense, Dropout
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(100,))) # Input layer with 128 neurons and ReLU activation
model.add(Dropout(0.5)) # Dropout layer randomly disables 50% of neurons during training to prevent overfitting
model.add(Dense(1, activation='sigmoid')) # Output layer with 1 neuron and sigmoid activation for binary classification
from tensorflow.keras.applications import VGG16Explanation:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten
# Load VGG16 without the fully connected layers on top
base_model = VGG16(include_top=False, input_shape=(224, 224, 3))
# Freeze the convolutional base to prevent training
for layer in base_model.layers:
layer.trainable = False
# Add new classifier layers on top
x = Flatten()(base_model.output) # Flatten feature maps to 1D vector
x = Dense(64, activation='relu')(x) # Fully connected layer with ReLU activation
output = Dense(1, activation='sigmoid')(x) # Single output neuron with sigmoid for binary classification
# Create the new model
model = Model(inputs=base_model.input, outputs=output)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
from tensorflow.keras import backend as KExplanation:
from tensorflow.keras.layers import Activation
from tensorflow.keras.utils import get_custom_objects
# Define the Swish activation function
def swish(x):
return x * K.sigmoid(x) # Swish = x * sigmoid(x)
# Register Swish as a custom activation for use in models
get_custom_objects().update({'swish': Activation(swish)})
# Example: Use Swish activation in a simple model
model = Sequential()
model.add(Dense(32, input_shape=(10,), activation='swish')) # Dense layer with Swish activation
from tensorflow.keras.models import SequentialExplanation:
from tensorflow.keras.layers import Dense
# Create a sequential model
model = Sequential()
model.add(Dense(32, input_shape=(10,), activation='relu')) # Input layer with 10 features
model.add(Dense(16, activation='relu')) # Hidden layer with 16 neurons
model.add(Dense(1, activation='sigmoid')) # Output layer for binary classification
# Compile the model with Adam optimizer and binary crossentropy loss
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Generate dummy input data: 100 samples, 10 features each
import numpy as np
X = np.random.rand(100, 10)
# Generate dummy binary labels for classification
y = np.random.randint(2, size=(100, 1))
# Train the model for 5 epochs
model.fit(X, y, epochs=5)