AI-Based Recipe Generator

import requests

import pandas as pd

import openai


#  Replace with your API keys

SPOONACULAR_API_KEY = "your_spoonacular_api_key"

OPENAI_API_KEY = "your_openai_api_key"


openai.api_key = OPENAI_API_KEY


# -----------------------------------

# 1. Get recipes from Spoonacular API

# -----------------------------------

def get_recipes_from_spoonacular(ingredients, number=5):

    url = f"https://api.spoonacular.com/recipes/findByIngredients"

    params = {

        "ingredients": ingredients,

        "number": number,

        "apiKey": SPOONACULAR_API_KEY

    }

    response = requests.get(url, params=params)

    if response.status_code == 200:

        return response.json()

    else:

        print("❌ Error:", response.json())

        return []


# -----------------------------------

# 2. Get detailed nutritional info

# -----------------------------------

def get_recipe_nutrition(recipe_id):

    url = f"https://api.spoonacular.com/recipes/{recipe_id}/nutritionWidget.json"

    params = {"apiKey": SPOONACULAR_API_KEY}

    response = requests.get(url, params=params)

    if response.status_code == 200:

        return response.json()

    return {}


# -----------------------------------

# 3. Use OpenAI to generate recipe idea

# -----------------------------------

def generate_ai_recipe(ingredients):

    prompt = f"Suggest a creative recipe using these ingredients: {ingredients}. Include steps and a short description."

    

    response = openai.Completion.create(

        engine="text-davinci-003",

        prompt=prompt,

        max_tokens=300,

        temperature=0.7

    )

    return response.choices[0].text.strip()


# -----------------------------------

# Example Run

# -----------------------------------

if __name__ == "__main__":

    user_ingredients = input("Enter ingredients (comma separated): ")

    

    print("\n🍲 Fetching recipe ideas from Spoonacular...\n")

    recipes = get_recipes_from_spoonacular(user_ingredients)

    

    recipe_list = []

    for r in recipes:

        nutrition = get_recipe_nutrition(r["id"])

        recipe_list.append({

            "Title": r["title"],

            "Used Ingredients": len(r["usedIngredients"]),

            "Missed Ingredients": len(r["missedIngredients"]),

            "Calories": nutrition.get("calories", "N/A"),

            "Carbs": nutrition.get("carbs", "N/A"),

            "Protein": nutrition.get("protein", "N/A"),

            "Fat": nutrition.get("fat", "N/A")

        })

    

    df = pd.DataFrame(recipe_list)

    print(df)

    

    print("\n🤖 AI Suggested Recipe:\n")

    ai_recipe = generate_ai_recipe(user_ingredients)

    print(ai_recipe)


Smart Resume Formatter

from docx import Document

from docx.shared import Pt

from fpdf import FPDF


# ---------------------------

# 1. Format Resume into Word

# ---------------------------

def create_word_resume(data, filename="resume.docx"):

    doc = Document()

    

    # Title (Name)

    title = doc.add_paragraph(data["name"])

    title.style = doc.styles['Title']

    

    # Contact Info

    doc.add_paragraph(f'Email: {data["email"]} | Phone: {data["phone"]}')

    

    # Sections

    doc.add_heading('Summary', level=1)

    doc.add_paragraph(data["summary"])

    

    doc.add_heading('Experience', level=1)

    for job in data["experience"]:

        doc.add_paragraph(f"{job['role']} at {job['company']} ({job['years']})")

        doc.add_paragraph(job["details"], style="List Bullet")

    

    doc.add_heading('Education', level=1)

    for edu in data["education"]:

        doc.add_paragraph(f"{edu['degree']} - {edu['institution']} ({edu['year']})")

    

    doc.add_heading('Skills', level=1)

    doc.add_paragraph(", ".join(data["skills"]))

    

    doc.save(filename)

    print(f"✅ Word Resume saved as {filename}")



# ---------------------------

# 2. Format Resume into PDF

# ---------------------------

def create_pdf_resume(data, filename="resume.pdf"):

    pdf = FPDF()

    pdf.add_page()

    pdf.set_font("Arial", 'B', 16)

    

    # Title (Name)

    pdf.cell(200, 10, data["name"], ln=True, align="C")

    

    pdf.set_font("Arial", '', 12)

    pdf.cell(200, 10, f'Email: {data["email"]} | Phone: {data["phone"]}', ln=True, align="C")

    

    # Sections

    pdf.set_font("Arial", 'B', 14)

    pdf.cell(200, 10, "Summary", ln=True)

    pdf.set_font("Arial", '', 12)

    pdf.multi_cell(0, 10, data["summary"])

    

    pdf.set_font("Arial", 'B', 14)

    pdf.cell(200, 10, "Experience", ln=True)

    pdf.set_font("Arial", '', 12)

    for job in data["experience"]:

        pdf.multi_cell(0, 10, f"{job['role']} at {job['company']} ({job['years']})\n - {job['details']}")

    

    pdf.set_font("Arial", 'B', 14)

    pdf.cell(200, 10, "Education", ln=True)

    pdf.set_font("Arial", '', 12)

    for edu in data["education"]:

        pdf.cell(200, 10, f"{edu['degree']} - {edu['institution']} ({edu['year']})", ln=True)

    

    pdf.set_font("Arial", 'B', 14)

    pdf.cell(200, 10, "Skills", ln=True)

    pdf.set_font("Arial", '', 12)

    pdf.multi_cell(0, 10, ", ".join(data["skills"]))

    

    pdf.output(filename)

    print(f"✅ PDF Resume saved as {filename}")



# ---------------------------

# Example Data

# ---------------------------

resume_data = {

    "name": "John Doe",

    "email": "john.doe@email.com",

    "phone": "+1-234-567-890",

    "summary": "Passionate software engineer with 5+ years of experience in building scalable applications.",

    "experience": [

        {"role": "Backend Developer", "company": "TechCorp", "years": "2020-2023", "details": "Developed APIs and microservices using Python & Django."},

        {"role": "Software Engineer", "company": "CodeWorks", "years": "2017-2020", "details": "Worked on automation tools and optimized system performance."}

    ],

    "education": [

        {"degree": "B.Sc. Computer Science", "institution": "XYZ University", "year": "2017"}

    ],

    "skills": ["Python", "Django", "Flask", "SQL", "Docker", "AWS"]

}


# Run both functions

create_word_resume(resume_data)

create_pdf_resume(resume_data)


AI Workout Form Corrector

import cv2

import mediapipe as mp

import numpy as np


mp_drawing = mp.solutions.drawing_utils

mp_pose = mp.solutions.pose


# -----------------------

# Calculate angle between 3 points

# -----------------------

def calculate_angle(a, b, c):

    a = np.array(a)  # First

    b = np.array(b)  # Mid

    c = np.array(c)  # End

    

    radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])

    angle = np.abs(radians*180.0/np.pi)

    

    if angle > 180.0:

        angle = 360 - angle

    return angle


# -----------------------

# Main workout tracker (Squats Example)

# -----------------------

cap = cv2.VideoCapture(0)


with mp_pose.Pose(min_detection_confidence=0.7, min_tracking_confidence=0.7) as pose:

    counter = 0

    stage = None

    

    while cap.isOpened():

        ret, frame = cap.read()

        if not ret:

            break

        

        # Recolor image

        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        image.flags.writeable = False

        

        # Make detection

        results = pose.process(image)

        

        # Recolor back to BGR

        image.flags.writeable = True

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        

        try:

            landmarks = results.pose_landmarks.landmark

            

            # Get coordinates

            hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,

                   landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]

            knee = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,

                    landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]

            ankle = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x,

                     landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]

            

            # Calculate angle

            angle = calculate_angle(hip, knee, ankle)

            

            # Visualize angle

            cv2.putText(image, str(int(angle)),

                        tuple(np.multiply(knee, [640, 480]).astype(int)),

                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA

                        )

            

            # Squat counter logic

            if angle > 160:

                stage = "up"

            if angle < 90 and stage == "up":

                stage = "down"

                counter += 1

                print(f"✅ Squat count: {counter}")

            

            # Feedback

            if angle < 70:

                feedback = "Too Low! Go Higher"

            elif 70 <= angle <= 100:

                feedback = "Perfect Depth ✅"

            else:

                feedback = "Stand Tall"

            

            cv2.putText(image, feedback, (50,100),

                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2, cv2.LINE_AA)

            

        except:

            pass

        

        # Render detections

        mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,

                                  mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),

                                  mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)

                                 )               

        

        cv2.imshow('AI Workout Form Corrector - Squats', image)

        

        if cv2.waitKey(10) & 0xFF == ord('q'):

            break

    

    cap.release()

    cv2.destroyAllWindows()


Voice Emotion Detector

import os

import librosa

import numpy as np

import sounddevice as sd

import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split

from sklearn.svm import SVC

from sklearn.preprocessing import LabelEncoder, StandardScaler

import pickle


# -----------------------

# STEP 1: Feature Extraction

# -----------------------

def extract_features(file_path):

    y, sr = librosa.load(file_path, duration=3, offset=0.5)

    mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)

    chroma = librosa.feature.chroma_stft(y=y, sr=sr)

    mel = librosa.feature.melspectrogram(y=y, sr=sr)

    

    # Take mean of each feature

    mfccs = np.mean(mfcc.T, axis=0)

    chroma = np.mean(chroma.T, axis=0)

    mel = np.mean(mel.T, axis=0)


    return np.hstack([mfccs, chroma, mel])


# -----------------------

# STEP 2: Training (Demo Dataset Simulation)

# -----------------------

def train_model():

    # Normally, load a dataset (RAVDESS, CREMA-D etc.)

    # Here, we'll simulate with few .wav files in "dataset/" folder

    

    emotions = {

        "angry": "angry",

        "happy": "happy",

        "sad": "sad",

        "neutral": "neutral"

    }

    

    X, y = [], []

    dataset_path = "dataset"  # folder with wav files: angry1.wav, happy2.wav, etc.

    

    for file in os.listdir(dataset_path):

        if file.endswith(".wav"):

            label = file.split("_")[0]  # e.g., angry_1.wav → "angry"

            feature = extract_features(os.path.join(dataset_path, file))

            X.append(feature)

            y.append(label)

    

    X = np.array(X)

    y = np.array(y)

    

    # Encode labels

    encoder = LabelEncoder()

    y = encoder.fit_transform(y)

    

    scaler = StandardScaler()

    X = scaler.fit_transform(X)

    

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    

    model = SVC(kernel="linear", probability=True)

    model.fit(X_train, y_train)

    

    acc = model.score(X_test, y_test)

    print(f"Model trained with accuracy: {acc*100:.2f}%")

    

    # Save model

    with open("emotion_model.pkl", "wb") as f:

        pickle.dump((model, encoder, scaler), f)


# -----------------------

# STEP 3: Record & Predict

# -----------------------

def record_and_predict(duration=3, fs=22050):

    print("Recording...")

    recording = sd.rec(int(duration * fs), samplerate=fs, channels=1)

    sd.wait()

    print("Recording complete. Saving as temp.wav...")

    librosa.output.write_wav("temp.wav", recording.flatten(), sr=fs)


    with open("emotion_model.pkl", "rb") as f:

        model, encoder, scaler = pickle.load(f)

    

    features = extract_features("temp.wav").reshape(1, -1)

    features = scaler.transform(features)

    pred = model.predict(features)[0]

    probas = model.predict_proba(features)[0]

    

    emotion = encoder.inverse_transform([pred])[0]

    print(f"Detected Emotion: {emotion}")

    

    # Plot probabilities

    plt.bar(encoder.classes_, probas)

    plt.title("Emotion Prediction Confidence")

    plt.show()


# -----------------------

# MAIN

# -----------------------

if __name__ == "__main__":

    if not os.path.exists("emotion_model.pkl"):

        print("Training model...")

        train_model()

    

    record_and_predict()