<!-- Steps to run a Python project --> 1. <!-- Make sure Python is installed. Check with: --> <code>python --version</code> 2. <!-- Create a Python file, e.g., app.py --> <code> # app.py print("Hello, Python Project!") </code> 3. <!-- Open your terminal or command prompt --> <!-- Navigate to the folder where your script is saved --> 4. <!-- Run the script using the command: --> <code>python app.py</code> <!-- Output: --> Hello, Python Project!
# Variables store data in memory for later use. # Python is dynamically typed, so no type declaration needed. name = "Alice" # String variable age = 25 # Integer variable is_student = True # Boolean variable print(name) # Output: Alice print(age) # Output: 25 print(is_student) # Output: True
# A function is a reusable block of code that performs a task. # Use the 'def' keyword to define a function. def greet(name): # Define function with one parameter print("Hello, " + name + "!") # Function body greet("Bob") # Output: Hello, Bob!
# A list is an ordered collection of items. # Lists are mutable and can contain mixed data types. fruits = ["apple", "banana", "cherry"] # Define a list print(fruits[0]) # Output: apple fruits.append("orange") # Add item to list print(fruits) # Output: ['apple', 'banana', 'cherry', 'orange']
# A for loop is used to iterate over sequences (like lists, strings, etc.) numbers = [1, 2, 3] for num in numbers: # Loop over list print(num) # Output: 1 2 3 (one per line)
# A dictionary holds key-value pairs. # It is unordered, mutable, and indexed by keys. student = { "name": "John", "age": 20, "grade": "A" } print(student["name"]) # Output: John student["age"] = 21 # Update value print(student["age"]) # Output: 21
# Use try-except blocks to catch and handle exceptions. try: result = 10 / 0 # Division by zero error except ZeroDivisionError: print("Cannot divide by zero!") # Output: Cannot divide by zero!
# Use the input() function to get input from the user. name = input("Enter your name: ") # Prompt user print("Hello, " + name + "!") # Output: Hello, <user_input>!
# Use open() function with write ('w') mode to write to files. with open("sample.txt", "w") as file: # Open file in write mode file.write("This is a test.") # Write to file # The 'with' block automatically closes the file
# Classes define blueprints for objects. # Objects are instances of classes. class Person: def __init__(self, name): # Constructor method self.name = name # Instance variable def greet(self): # Method print("Hi, I am " + self.name) p = Person("Alice") # Create object p.greet() # Output: Hi, I am Alice
# List comprehensions provide a concise way to create lists. numbers = [1, 2, 3, 4, 5] squares = [x**2 for x in numbers] # Square each number print(squares) # Output: [1, 4, 9, 16, 25]
# A lambda function is a small anonymous function. # It can have any number of arguments but only one expression. square = lambda x: x * x # Define lambda function print(square(5)) # Output: 25
# == compares values. 'is' compares identities (memory addresses). a = [1, 2, 3] b = [1, 2, 3] c = a print(a == b) # True: values are equal print(a is b) # False: different objects print(a is c) # True: same object
# Slicing extracts a portion of a list, string, or tuple. my_list = [0, 1, 2, 3, 4, 5] print(my_list[1:4]) # Output: [1, 2, 3] print(my_list[:3]) # Output: [0, 1, 2] print(my_list[::2]) # Output: [0, 2, 4]
# Lists are mutable; tuples are immutable. my_list = [1, 2, 3] # Can change values my_tuple = (1, 2, 3) # Cannot change values my_list[0] = 10 # Works # my_tuple[0] = 10 # Error
# *args allows variable number of positional arguments # **kwargs allows variable number of keyword arguments def show_args(*args, **kwargs): print(args) # Tuple of positional arguments print(kwargs) # Dictionary of keyword arguments show_args(1, 2, 3, a=10, b=20) # Output: (1, 2, 3) # {'a': 10, 'b': 20}
# You can reverse a list in multiple ways my_list = [1, 2, 3, 4] reversed1 = my_list[::-1] # Slicing my_list.reverse() # In-place reverse reversed2 = list(reversed(my_list)) # Using reversed() print(reversed1) # Output: [4, 3, 2, 1]
# Use 'import' to include modules import math # Import entire module print(math.sqrt(16)) # Output: 4.0 from math import pi # Import specific object print(pi) # Output: 3.14159...
# Sets are unordered collections of unique items my_set = {1, 2, 2, 3, 4} print(my_set) # Output: {1, 2, 3, 4} my_set.add(5) # Add item my_set.remove(1) # Remove item
# Generators yield values one at a time using 'yield' def count_up_to(max): count = 1 while count <= max: yield count count += 1 for num in count_up_to(3): print(num) # Output: 1 2 3
# Decorators modify the behavior of functions. def decorator_func(func): def wrapper(): print("Before function call") func() print("After function call") return wrapper @decorator_func def say_hello(): print("Hello!") say_hello() # Output: # Before function call # Hello! # After function call
# Use try-except blocks to handle exceptions gracefully try: x = 10 / 0 # Will raise ZeroDivisionError except ZeroDivisionError: print("Cannot divide by zero!") # Handle error finally: print("Execution finished.") # Always runs
# 'with' ensures proper resource management (e.g., closing files) with open("example.txt", "w") as file: file.write("Hello, world!") # File is automatically closed
# Use type() function x = 5 print(type(x)) # Output: <class 'int'> y = "hello" print(type(y)) # Output: <class 'str'>
# A dictionary is a collection of key-value pairs person = { "name": "Alice", "age": 25, "city": "New York" } print(person["name"]) # Output: Alice
# List comprehension provides a concise way to create lists squares = [x*x for x in range(5)] print(squares) # Output: [0, 1, 4, 9, 16]
# append() adds a single element # extend() adds elements from another iterable a = [1, 2] a.append([3, 4]) print(a) # Output: [1, 2, [3, 4]] b = [1, 2] b.extend([3, 4]) print(b) # Output: [1, 2, 3, 4]
# Use '#' for single-line comments # Use triple quotes ''' or """ for multiline docstrings # This is a single-line comment """ This is a multiline comment or docstring """
# 'pass' is a placeholder for future code def function_to_implement_later(): pass # No operation, just a placeholder class EmptyClass: pass
x = 5 # Global variable def my_func(): x = 10 # Local variable print(x) # Output: 10 my_func() print(x) # Output: 5
# Use built-in functions: int(), float(), str(), list(), etc. x = "123" print(int(x)) # Output: 123 y = 3.14 print(str(y)) # Output: "3.14" z = [1, 2, 3] print(tuple(z)) # Output: (1, 2, 3)
# 'is' checks for identity (whether two variables refer to the same object) a = [1, 2] b = a c = [1, 2] print(a is b) # True (same object) print(a is c) # False (same value, different object)
# Slicing is used to get a part of a list, string, or tuple numbers = [0, 1, 2, 3, 4, 5] print(numbers[1:4]) # Output: [1, 2, 3] print(numbers[:3]) # Output: [0, 1, 2] print(numbers[::2]) # Output: [0, 2, 4]
# *args allows variable number of positional arguments # **kwargs allows variable number of keyword arguments def show_info(*args, **kwargs): print(args) print(kwargs) show_info(1, 2, 3, name="Alice", age=30) # Output: (1, 2, 3) # Output: {'name': 'Alice', 'age': 30}
# Open a file to write with open("sample.txt", "w") as f: f.write("Hello Python!") # Open a file to read with open("sample.txt", "r") as f: content = f.read() print(content) # Output: Hello Python!
# Lambda functions are anonymous functions defined using the lambda keyword add = lambda x, y: x + y print(add(2, 3)) # Output: 5
from functools import reduce nums = [1, 2, 3, 4, 5] # map - apply function to all elements squares = list(map(lambda x: x*x, nums)) print(squares) # Output: [1, 4, 9, 16, 25] # filter - filter based on condition even = list(filter(lambda x: x%2 == 0, nums)) print(even) # Output: [2, 4] # reduce - apply function cumulatively sum_all = reduce(lambda x, y: x + y, nums) print(sum_all) # Output: 15
# 'in' checks membership in iterables fruits = ["apple", "banana", "cherry"] print("apple" in fruits) # Output: True print("grape" not in fruits) # Output: True
dict1 = {'a': 1, 'b': 2} dict2 = {'b': 3, 'c': 4} # Python 3.9+ merged = dict1 | dict2 print(merged) # Output: {'a': 1, 'b': 3, 'c': 4} # For older versions merged = {**dict1, **dict2} print(merged) # Output: {'a': 1, 'b': 3, 'c': 4}
# Module: a file with Python code (.py) # Package: a directory with __init__.py and modules # example.py def greet(): print("Hello from module!") # main.py import example example.greet() # Output: Hello from module!
# Virtual environment isolates dependencies for each project # Create virtual environment python -m venv env # Activate on Windows env\Scripts\activate # Activate on Unix/macOS source env/bin/activate # Install packages locally pip install requests
import datetime # Current date and time now = datetime.datetime.now() print("Now:", now) # Create a specific date d = datetime.date(2025, 5, 19) print("Date:", d) # Format date print("Formatted:", now.strftime("%Y-%m-%d"))
# Generators use yield to produce items one by one def countdown(n): while n > 0: yield n n -= 1 for i in countdown(3): print(i) # Output: 3 2 1
import sys # Prints the list of command-line arguments print("Arguments:", sys.argv) # Example: # python script.py hello world # Output: ['script.py', 'hello', 'world']
# Defining a class class Dog: def __init__(self, name): self.name = name def bark(self): print(self.name + " says woof!") # Creating object d = Dog("Buddy") d.bark() # Output: Buddy says woof!
# Inheritance allows a class to use methods and properties of another class Animal: def speak(self): print("Animal speaks") class Dog(Animal): def bark(self): print("Dog barks") d = Dog() d.speak() # Output: Animal speaks d.bark() # Output: Dog barks
# Decorator adds functionality to an existing function def my_decorator(func): def wrapper(): print("Before function") func() print("After function") return wrapper @my_decorator def greet(): print("Hello!") greet() # Output: # Before function # Hello! # After function
# List comprehension example squares = [x*x for x in range(5)] print(squares) # Output: [0, 1, 4, 9, 16] # Dictionary comprehension d = {x: x*x for x in range(3)} print(d) # Output: {0: 0, 1: 1, 2: 4}
numbers = [4, 2, 9, 1] # Sort ascending numbers.sort() print(numbers) # Output: [1, 2, 4, 9] # Sort descending numbers.sort(reverse=True) print(numbers) # Output: [9, 4, 2, 1]
names = ["Alice", "Bob"] scores = [85, 90] # Combine elements from both lists combined = list(zip(names, scores)) print(combined) # Output: [('Alice', 85), ('Bob', 90)]
# Reverse a string s = "hello" print(s[::-1]) # Output: "olleh" # Reverse a list nums = [1, 2, 3] nums.reverse() print(nums) # Output: [3, 2, 1]
# Context managers manage resources like files with open("file.txt", "w") as f: f.write("Hello, world!") # File auto-closes after this block
# Lambda creates anonymous functions add = lambda a, b: a + b print(add(2, 3)) # Output: 5
import sys x = [1, 2, 3, 4] print(sys.getsizeof(x)) # Output: memory size in bytes
# Create virtual environment python -m venv myenv # Activate (Windows) myenv\Scripts\activate # Activate (Mac/Linux) source myenv/bin/activate
# *args: variable number of positional arguments # **kwargs: variable number of keyword arguments def demo(*args, **kwargs): print("args:", args) print("kwargs:", kwargs) demo(1, 2, a=3, b=4) # Output: args: (1, 2), kwargs: {'a': 3, 'b': 4}
a = {'x': 1} b = {'y': 2} # Merge using unpacking merged = {**a, **b} print(merged) # Output: {'x': 1, 'y': 2}
# Magic methods start and end with __ class Book: def __init__(self, title): self.title = title def __str__(self): return f"Book: {self.title}" b = Book("Python") print(b) # Output: Book: Python
my_dict = {"a": 1} # Use get() with default value print(my_dict.get("b", "Not found")) # Output: Not found
# == checks value equality # is checks identity (same memory location) a = [1, 2] b = [1, 2] print(a == b) # True print(a is b) # False
# map() applies a function to every item in a list nums = [1, 2, 3] squared = list(map(lambda x: x*x, nums)) print(squared) # Output: [1, 4, 9]
# Example using scikit-learn model from sklearn.linear_model import LinearRegression model = LinearRegression() X = [[0], [1], [2]] y = [0, 1, 2] model.fit(X, y) # Train the model print(model.predict([[3]])) # Predicts: [3.]
import openai openai.api_key = "your-api-key" response = openai.ChatCompletion.create( model="gpt-4", messages=[{"role": "user", "content": "Hello!"}] ) print(response.choices[0].message['content'])
from transformers import pipeline generator = pipeline("text-generation", model="gpt2") print(generator("AI will change the world because", max_length=30))
from flask import Flask, request, jsonify from transformers import pipeline app = Flask(__name__) classifier = pipeline("sentiment-analysis") @app.route('/analyze', methods=['POST']) def analyze(): text = request.json['text'] result = classifier(text) return jsonify(result) # Run with: flask run
import tensorflow as tf from tensorflow.keras import layers model = tf.keras.Sequential([ layers.Flatten(input_shape=(28, 28)), layers.Dense(128, activation='relu'), layers.Dense(10) ]) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # model.fit(x_train, y_train, epochs=5)
from fastapi import FastAPI from pydantic import BaseModel from transformers import pipeline app = FastAPI() summarizer = pipeline("summarization") class RequestBody(BaseModel): text: str @app.post("/summarize") def summarize(req: RequestBody): result = summarizer(req.text, max_length=50) return result
import speech_recognition as sr r = sr.Recognizer() with sr.Microphone() as source: print("Say something:") audio = r.listen(source) try: print("You said:", r.recognize_google(audio)) except: print("Sorry, could not recognize.")
import cv2 # Load image and convert to grayscale img = cv2.imread("image.jpg") gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Display image cv2.imshow("Gray Image", gray) cv2.waitKey(0) cv2.destroyAllWindows()
from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.to("cuda") # Use GPU if available image = pipe("A fantasy landscape with dragons").images[0] image.save("generated.png")
import cv2 import torch from ultralytics import YOLO model = YOLO("yolov8n.pt") # Pre-trained model cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() results = model(frame) # Inference annotated_frame = results[0].plot() cv2.imshow("YOLO Detection", annotated_frame) if cv2.waitKey(1) & 0xFF == ord("q"): break cap.release() cv2.destroyAllWindows()
# In views.py of a Django app from django.http import JsonResponse from transformers import pipeline summarizer = pipeline("summarization") def summarize_view(request): text = request.GET.get('text', '') summary = summarizer(text, max_length=50) return JsonResponse({'summary': summary})
# streamlit_app.py import streamlit as st from transformers import pipeline classifier = pipeline("sentiment-analysis") st.title("AI Sentiment Analyzer") text = st.text_input("Enter your text:") if text: result = classifier(text) st.write(result) # Run with: streamlit run streamlit_app.py
from telegram.ext import Updater, CommandHandler from transformers import pipeline summarizer = pipeline("summarization") def start(update, context): update.message.reply_text("Send /summarize & your text") def summarize(update, context): text = " ".join(context.args) summary = summarizer(text, max_length=50) update.message.reply_text(summary[0]['summary_text']) updater = Updater("YOUR_BOT_TOKEN", use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler("start", start)) dp.add_handler(CommandHandler("summarize", summarize)) updater.start_polling()
# Flask API (Python backend) from flask import Flask, request, jsonify from transformers import pipeline app = Flask(__name__) classifier = pipeline("sentiment-analysis") @app.route('/analyze', methods=['POST']) def analyze(): text = request.json['text'] result = classifier(text) return jsonify(result) # Use this API in mobile app via HTTP request
import face_recognition import cv2 image = face_recognition.load_image_file("person.jpg") face_locations = face_recognition.face_locations(image) print(f"Found {len(face_locations)} face(s).")
import discord from transformers import pipeline client = discord.Client() summarizer = pipeline("summarization") @client.event async def on_message(message): if message.content.startswith('!summarize'): text = message.content[len('!summarize '):] summary = summarizer(text, max_length=50) await message.channel.send(summary[0]['summary_text']) client.run("YOUR_DISCORD_BOT_TOKEN")
from chatterbot import ChatBot from chatterbot.trainers import ChatterBotCorpusTrainer bot = ChatBot("AI Bot") trainer = ChatterBotCorpusTrainer(bot) trainer.train("chatterbot.corpus.english") response = bot.get_response("How are you?") print(response)
# Sample AI + IoT (Raspberry Pi with temperature sensor) import Adafruit_DHT from sklearn.linear_model import LinearRegression # Sensor setup sensor = Adafruit_DHT.DHT11 pin = 4 humidity, temperature = Adafruit_DHT.read_retry(sensor, pin) # Predict temperature trend with dummy data model = LinearRegression() model.fit([[0], [1], [2]], [22, 23, 24]) prediction = model.predict([[3]]) print("Predicted temperature:", prediction)
import sqlite3 from transformers import pipeline classifier = pipeline("sentiment-analysis") # Connect to DB conn = sqlite3.connect("feedback.db") cursor = conn.cursor() cursor.execute("SELECT comment FROM reviews") for row in cursor.fetchall(): sentiment = classifier(row[0]) print(f"{row[0]} -> {sentiment}") conn.close()
import gspread from oauth2client.service_account import ServiceAccountCredentials from transformers import pipeline # Setup scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] creds = ServiceAccountCredentials.from_json_keyfile_name("creds.json", scope) client = gspread.authorize(creds) sheet = client.open("Feedback").sheet1 data = sheet.col_values(1) classifier = pipeline("sentiment-analysis") for comment in data[1:]: sentiment = classifier(comment) print(comment, "->", sentiment)
# Using scikit-learn to classify emails as spam or not from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB emails = [ "Win a free lottery now", "Meeting at 10am tomorrow", "Claim your free prize", "Project update attached" ] labels = [1, 0, 1, 0] # 1: spam, 0: not spam vectorizer = CountVectorizer() features = vectorizer.fit_transform(emails) model = MultinomialNB() model.fit(features, labels) test_email = ["Free lottery winner"] test_features = vectorizer.transform(test_email) prediction = model.predict(test_features) print("Spam" if prediction[0] == 1 else "Not Spam")
import numpy as np # Sample user ratings matrix (users x items) ratings = np.array([ [5, 4, 0, 0], [4, 0, 0, 2], [1, 1, 0, 5], [0, 0, 5, 4], ]) # Simple item-based similarity (cosine) from sklearn.metrics.pairwise import cosine_similarity item_similarity = cosine_similarity(ratings.T) # Recommend items for user 0 user_ratings = ratings[0] scores = item_similarity.dot(user_ratings) print("Recommendation scores:", scores)
import tweepy from transformers import pipeline # Authenticate to Twitter API auth = tweepy.OAuth1UserHandler("API_KEY", "API_SECRET", "ACCESS_TOKEN", "ACCESS_TOKEN_SECRET") api = tweepy.API(auth) classifier = pipeline("sentiment-analysis") tweets = api.search_tweets(q="python", count=5) for tweet in tweets: result = classifier(tweet.text) print(tweet.text) print("Sentiment:", result)
from transformers import BlipProcessor, BlipForConditionalGeneration from PIL import Image processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") image = Image.open("sample.jpg") inputs = processor(image, return_tensors="pt") out = model.generate(**inputs) caption = processor.decode(out[0], skip_special_tokens=True) print("Caption:", caption)
from transformers import pipeline summarizer = pipeline("summarization") text = """ Artificial intelligence is the simulation of human intelligence processes by machines, especially computer systems. Applications include expert systems, natural language processing, speech recognition and machine vision. """ summary = summarizer(text, max_length=50, min_length=25, do_sample=False) print("Summary:", summary[0]['summary_text'])
import pytesseract from PIL import Image image = Image.open("document.png") text = pytesseract.image_to_string(image) print("Extracted Text:") print(text)
import speech_recognition as sr import pyttsx3 recognizer = sr.Recognizer() engine = pyttsx3.init() with sr.Microphone() as source: print("Say something...") audio = recognizer.listen(source) try: text = recognizer.recognize_google(audio) print("You said:", text) engine.say("You said " + text) engine.runAndWait() except Exception as e: print("Error:", e)
# Using RPA library and AI sentiment analysis together import rpa as r from transformers import pipeline r.init() r.url('https://www.example.com/contact') sentiment = pipeline("sentiment-analysis") text = r.read('//*[@id="contact-form"]/textarea') result = sentiment(text) print("Sentiment:", result) r.close()
import numpy as np from sklearn.linear_model import LinearRegression # Sample stock prices prices = np.array([100, 102, 101, 105, 110]) days = np.array([1, 2, 3, 4, 5]).reshape(-1, 1) model = LinearRegression() model.fit(days, prices) predict_day = np.array([[6]]) predicted_price = model.predict(predict_day) print("Predicted price on day 6:", predicted_price[0])
# Anomaly detection using IsolationForest from sklearn.ensemble import IsolationForest import numpy as np # Sample data with an anomaly data = np.array([[10], [12], [10], [13], [100]]) model = IsolationForest(contamination=0.1) model.fit(data) pred = model.predict(data) # -1 means anomaly, 1 means normal print("Predictions:", pred) # Output explanation: The 100 is flagged as anomaly (-1)
from transformers import GPT2LMHeadModel, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2LMHeadModel.from_pretrained('gpt2') input_text = "Once upon a time" inputs = tokenizer.encode(input_text, return_tensors='pt') outputs = model.generate(inputs, max_length=50, num_return_sequences=1) generated = tokenizer.decode(outputs[0], skip_special_tokens=True) print("Generated text:") print(generated)
from transformers import pipeline chatbot = pipeline("conversational") from transformers import Conversation conv = Conversation("Hello, how are you?") response = chatbot(conv) print("Chatbot response:", response.generated_responses[-1])
# Example assumes usage of diffusers library from diffusers import StableDiffusionPipeline import torch model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "A fantasy landscape with castles" image = pipe(prompt).images[0] image.save("generated_image.png") print("Image saved as generated_image.png")
import speech_recognition as sr r = sr.Recognizer() with sr.AudioFile('audio.wav') as source: audio = r.record(source) text = r.recognize_google(audio) print("Transcribed text:") print(text)
from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono") model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-mono") input_code = "def add_numbers(a, b):" inputs = tokenizer(input_code, return_tensors="pt") outputs = model.generate(**inputs, max_length=50) generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True) print("Generated code:") print(generated_code)
import face_recognition image = face_recognition.load_image_file("test.jpg") face_locations = face_recognition.face_locations(image) print(f"Found {len(face_locations)} face(s) in the image.") for i, location in enumerate(face_locations): print(f"Face {i+1} location: {location}")
import cv2 import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() results = model(frame) results.render() cv2.imshow('YOLOv5 Detection', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
from transformers import pipeline translator = pipeline("translation_en_to_fr") text = "Hello, how are you?" translation = translator(text, max_length=40) print("Translation:", translation[0]['translation_text'])
from transformers import pipeline # Initialize sentiment-analysis pipeline sentiment_analyzer = pipeline("sentiment-analysis") text = "I love using AI tools, they make life easier!" result = sentiment_analyzer(text) print("Sentiment analysis result:") print(result) # Output: [{'label': 'POSITIVE', 'score': 0.9998}]
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt # Sample time series data (days and sales) days = np.arange(1, 11).reshape(-1,1) sales = np.array([200, 220, 250, 270, 300, 320, 350, 370, 400, 420]) # Train/test split X_train, X_test, y_train, y_test = train_test_split(days, sales, test_size=0.2, random_state=42) # Fit linear regression model (simple forecasting) model = LinearRegression() model.fit(X_train, y_train) # Predict future sales for next 3 days future_days = np.array([11,12,13]).reshape(-1,1) predictions = model.predict(future_days) print("Predicted sales for next days:") for day, pred in zip(future_days.flatten(), predictions): print(f"Day {day}: {pred:.2f}") # Plot plt.scatter(days, sales, color='blue', label='Actual sales') plt.plot(np.arange(1,14), model.predict(np.arange(1,14).reshape(-1,1)), color='red', label='Forecast') plt.xlabel('Day') plt.ylabel('Sales') plt.legend() plt.show()