AI Whiteboard Digitizer

#!/usr/bin/env python3

"""

AI Whiteboard Digitizer (prototype)


Usage:

    python whiteboard_digitizer.py input_image.jpg


Outputs:

 - ocr_texts.txt          : OCR'd text lines (raw)

 - equations_latex.tex    : LaTeX for parseable math expressions

 - diagram.svg (or diagram.png) : vector-like rendering of detected lines/circles

 - several debug images in ./debug_*.png

"""


import sys

import os

import cv2

import numpy as np

from PIL import Image

import pytesseract

from sympy import sympify, latex

from sympy.core.sympify import SympifyError

import svgwrite

import matplotlib.pyplot as plt


# If on Windows and tesseract is not in PATH, set path here (uncomment and adjust)

# pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"


# ---------- Utilities ----------

def ensure_dir(path):

    if not os.path.exists(path):

        os.makedirs(path)


# Preprocess: grayscale, denoise, adaptive threshold, deskew

def preprocess_image(img_bgr, max_dim=1600):

    # Resize to manageable size, keep aspect

    h, w = img_bgr.shape[:2]

    scale = min(1.0, float(max_dim) / max(h, w))

    if scale != 1.0:

        img_bgr = cv2.resize(img_bgr, (int(w*scale), int(h*scale)), interpolation=cv2.INTER_AREA)

    gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)

    # Denoise

    gray = cv2.fastNlMeansDenoising(gray, None, 10, 7, 21)

    # Bilateral to preserve edges

    gray = cv2.bilateralFilter(gray, 9, 75, 75)

    # Adaptive threshold (whiteboard: dark text on light background)

    th = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,

                               cv2.THRESH_BINARY_INV, 25, 12)

    # Deskew based on largest text contours or Hough lines

    coords = np.column_stack(np.where(th > 0))

    if coords.shape[0] > 0:

        angle = cv2.minAreaRect(coords)[-1]

        # Correction of angle

        if angle < -45:

            angle = -(90 + angle)

        else:

            angle = -angle

        # Rotate

        (h2, w2) = gray.shape[:2]

        M = cv2.getRotationMatrix2D((w2//2, h2//2), angle, 1.0)

        gray = cv2.warpAffine(gray, M, (w2, h2), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)

        th = cv2.warpAffine(th, M, (w2, h2), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)

    return gray, th


# Find text boxes using MSER or connected components (we'll use morphological dilation + contours)

def detect_text_regions(thresh_img, min_area=200, debug_out=None):

    # dilate to join letters into words/lines

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 5))

    dil = cv2.dilate(thresh_img, kernel, iterations=2)

    contours, _ = cv2.findContours(dil, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    boxes = []

    for cnt in contours:

        x, y, w, h = cv2.boundingRect(cnt)

        if w*h < min_area:

            continue

        # filter very tall or very wide noise

        if h < 10 or w < 20:

            continue

        boxes.append((x, y, w, h))

    # sort top to bottom, left to right

    boxes = sorted(boxes, key=lambda b: (b[1], b[0]))

    if debug_out is not None:

        vis = cv2.cvtColor(thresh_img, cv2.COLOR_GRAY2BGR)

        for (x,y,w,h) in boxes:

            cv2.rectangle(vis, (x,y), (x+w,y+h), (0,255,0), 2)

        cv2.imwrite(debug_out, vis)

    return boxes


# OCR each region (use appropriate psm)

def ocr_regions(gray_img, boxes, ocr_lang='eng'):

    lines = []

    for (x, y, w, h) in boxes:

        pad = 4

        x0 = max(0, x-pad)

        y0 = max(0, y-pad)

        x1 = min(gray_img.shape[1], x+w+pad)

        y1 = min(gray_img.shape[0], y+h+pad)

        crop = gray_img[y0:y1, x0:x1]

        # increase contrast and invert if necessary

        # Convert to PIL for pytesseract

        pil = Image.fromarray(crop)

        # Tesseract config: treat as a single line or single block

        config = "--psm 7"  # treat as a single text line (good for equations on a line)

        text = pytesseract.image_to_string(pil, lang=ocr_lang, config=config)

        text = text.strip()

        if text:

            lines.append({'box': (x0,y0,x1,y1), 'text': text})

    return lines


# Heuristic to check if a line looks like an equation/expression

def looks_like_equation(s):

    # Accept digits, letters, operators and = ^ / * + - parentheses, fractions-like '/', greek? etc.

    import re

    s2 = s.replace(' ', '')

    # Must have at least one operator or equal sign or variable

    if re.search(r'[=\+\-\*/\^]', s2):

        return True

    # Or something like 'lim', 'sin', 'cos' etc.

    if re.search(r'\b(sin|cos|tan|log|ln|lim|sqrt)\b', s.lower()):

        return True

    # Or presence of digits next to letters (like 2x or x2)

    if re.search(r'\d+[a-zA-Z]|[a-zA-Z]\d+', s):

        return True

    return False


# Clean OCR text for sympy: replace common OCR artifacts

def clean_ocr_for_sympy(s):

    # Basic replacements; adapt as needed

    repl = {

        '×': '*',

        'X': 'x',

        '—': '-',

        '−': '-',

        '–': '-',

        '÷': '/',

        '’': "'",

        '‘': "'",

        '“': '"',

        '”': '"',

        'O': '0',  # risky: only if look like zero

    }

    out = s

    # Remove stray non-ascii except math symbols

    for k,v in repl.items():

        out = out.replace(k, v)

    # convert superscript-like to **: e.g., x^2 -> x**2 (keep ^ too)

    out = out.replace('^', '**')

    # Remove weird characters

    allowed = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+-*/=().,** _^"

    # keep letters and mathematical common symbols; but avoid removing letters like greek

    # Simpler: strip only control characters

    out = ''.join(ch for ch in out if (ch.isprintable()))

    out = out.strip()

    return out


# Try to parse with sympy and produce latex

def parse_equation_to_latex(s):

    s_clean = clean_ocr_for_sympy(s)

    # If contains '=' treat as equation; else expression

    try:

        if '=' in s_clean:

            # sympy's Eq expects left and right; split on first '='

            left, right = s_clean.split('=', 1)

            eleft = sympify(left)

            eright = sympify(right)

            eq = eleft - eright  # expression equal zero

            # represent as LaTeX equation

            latex_str = latex(eleft) + " = " + latex(eright)

        else:

            expr = sympify(s_clean)

            latex_str = latex(expr)

        return latex_str, None

    except SympifyError as e:

        return None, f"SympifyError: {e}"

    except Exception as e:

        return None, str(e)


# Detect simple geometric primitives (lines via Hough, circles via HoughCircles)

def detect_shapes(gray_img, debug_prefix=None):

    # Use edge detection

    edges = cv2.Canny(gray_img, 50, 150, apertureSize=3)

    # Hough lines

    lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=80, minLineLength=50, maxLineGap=10)

    line_list = []

    if lines is not None:

        for l in lines:

            x1,y1,x2,y2 = l[0]

            line_list.append((int(x1),int(y1),int(x2),int(y2)))

    # Hough circles

    circles = None

    try:

        circ = cv2.HoughCircles(gray_img, cv2.HOUGH_GRADIENT, dp=1.2, minDist=30,

                                param1=100, param2=30, minRadius=8, maxRadius=200)

        if circ is not None:

            circ = np.round(circ[0, :]).astype("int")

            circles = [(int(x),int(y),int(r)) for (x,y,r) in circ]

    except Exception:

        circles = None

    # Save debug

    if debug_prefix:

        vis = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR)

        for (x1,y1,x2,y2) in line_list:

            cv2.line(vis, (x1,y1), (x2,y2), (0,255,0), 2)

        if circles:

            for (x,y,r) in circles:

                cv2.circle(vis, (x,y), r, (0,0,255), 2)

        cv2.imwrite(f"{debug_prefix}_shapes.png", vis)

    return line_list, circles


# Render vector-style diagram to SVG using svgwrite or Matplotlib

def render_vector_diagram(svg_path, image_size, lines, circles, boxes=None):

    w, h = image_size

    dwg = svgwrite.Drawing(svg_path, size=(w, h))

    # background white

    dwg.add(dwg.rect(insert=(0,0), size=(w,h), fill='white'))

    # optional: draw boxes for text regions (thin gray)

    if boxes:

        for (x0,y0,x1,y1) in boxes:

            dwg.add(dwg.rect(insert=(x0,y0), size=(x1-x0,y1-y0), fill='none', stroke='lightgray', stroke_width=1))

    # draw lines

    for (x1,y1,x2,y2) in lines:

        dwg.add(dwg.line(start=(x1,y1), end=(x2,y2), stroke=svgwrite.rgb(10, 10, 16, '%'), stroke_width=2))

    # draw circles

    if circles:

        for (x,y,r) in circles:

            dwg.add(dwg.circle(center=(x,y), r=r, stroke='black', fill='none', stroke_width=2))

    dwg.save()


# Main pipeline

def process_whiteboard_image(in_path, out_dir="wb_outputs"):

    ensure_dir(out_dir)

    img_bgr = cv2.imread(in_path)

    if img_bgr is None:

        raise FileNotFoundError(in_path)

    gray, th = preprocess_image(img_bgr)

    debug_pre = os.path.join(out_dir, "debug_preprocess.png")

    cv2.imwrite(debug_pre, gray)

    cv2.imwrite(os.path.join(out_dir, "debug_thresh.png"), th)


    # detect regions (text lines)

    boxes = detect_text_regions(th, debug_out=os.path.join(out_dir, "debug_boxes.png"))

    # OCR

    ocr_lines = ocr_regions(gray, boxes)

    # write OCR results

    ocr_txt_file = os.path.join(out_dir, "ocr_texts.txt")

    with open(ocr_txt_file, "w", encoding='utf-8') as f:

        for item in ocr_lines:

            f.write(item['text'] + "\n")

    print(f"[+] OCR lines saved to {ocr_txt_file}")


    # Filter likely equations

    eq_candidates = [it for it in ocr_lines if looks_like_equation(it['text'])]

    latex_results = []

    for it in eq_candidates:

        txt = it['text']

        latex_str, err = parse_equation_to_latex(txt)

        if latex_str:

            latex_results.append((txt, latex_str))

        else:

            latex_results.append((txt, f"UNPARSEABLE: {err}"))

    # write equations latex

    eq_file = os.path.join(out_dir, "equations_latex.tex")

    with open(eq_file, "w", encoding='utf-8') as f:

        f.write("% Generated LaTeX (auto) — review and correct as needed\n")

        for orig, out in latex_results:

            f.write("% OCR: " + orig.replace("\n"," ") + "\n")

            f.write(out + "\n\n")

    print(f"[+] Equation LaTeX saved to {eq_file}")


    # shape detection

    lines, circles = detect_shapes(gray, debug_prefix=os.path.join(out_dir, "debug"))

    svg_path = os.path.join(out_dir, "diagram.svg")

    # convert boxes to x0,y0,x1,y1 format for svg (optional)

    bboxes = [(x,y,x+w,y+h) for (x,y,w,h) in boxes]

    render_vector_diagram(svg_path, (gray.shape[1], gray.shape[0]), lines, circles, boxes=bboxes)

    print(f"[+] Diagram SVG saved to {svg_path}")


    # Also produce a matplotlib PNG overlay visualization

    overlay = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)

    for (x1,y1,x2,y2) in lines:

        cv2.line(overlay, (x1,y1), (x2,y2), (0,255,0), 2)

    if circles:

        for (x,y,r) in circles:

            cv2.circle(overlay, (x,y), r, (0,0,255), 2)

    for (x0,y0,x1,y1) in bboxes:

        cv2.rectangle(overlay, (x0,y0), (x1,y1), (255,0,0), 1)

    cv2.imwrite(os.path.join(out_dir, "overlay_debug.png"), overlay)

    print(f"[+] Debug overlay saved to {os.path.join(out_dir, 'overlay_debug.png')}")

    return {

        "ocr_lines": ocr_lines,

        "equations": latex_results,

        "svg": svg_path,

        "debug": out_dir

    }


# --------- CLI ----------

if __name__ == "__main__":

    if len(sys.argv) < 2:

        print("Usage: python whiteboard_digitizer.py input_image.jpg")

        sys.exit(1)

    inp = sys.argv[1]

    out = "wb_outputs"

    res = process_whiteboard_image(inp, out_dir=out)

    print("Done. Outputs in:", out)


Virtual AI Travel Planner

import os

import json

import time

import requests

import pandas as pd

import streamlit as st

from datetime import date, timedelta


# -----------------------------

# Setup: API Keys from env vars

# -----------------------------

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")

GOOGLE_MAPS_API_KEY = os.getenv("GOOGLE_MAPS_API_KEY", "")


# -----------------------------

# Google Places helpers

# -----------------------------

PLACES_TEXT_SEARCH_URL = "https://maps.googleapis.com/maps/api/place/textsearch/json"


def places_text_search(query, key, max_results=20):

    """

    Simple wrapper for Google Places 'Text Search' API.

    We page through results (up to ~60) but cap by max_results.

    """

    if not key:

        return []


    params = {

        "query": query,

        "key": key,

    }

    out = []

    next_page_token = None

    while True:

        if next_page_token:

            params["pagetoken"] = next_page_token

            # Google requires short delay before using next_page_token

            time.sleep(2)


        r = requests.get(PLACES_TEXT_SEARCH_URL, params=params, timeout=30)

        if r.status_code != 200:

            break

        data = r.json()

        out.extend(data.get("results", []))


        next_page_token = data.get("next_page_token")

        if not next_page_token or len(out) >= max_results:

            break


    return out[:max_results]



def fetch_hotels(destination: str, max_results=20):

    """

    Fetch hotels around destination. Uses price_level (0-4) and rating when available.

    """

    results = places_text_search(f"hotels in {destination}", GOOGLE_MAPS_API_KEY, max_results=max_results)

    rows = []

    for r in results:

        rows.append({

            "name": r.get("name"),

            "rating": r.get("rating"),

            "reviews": r.get("user_ratings_total"),

            "price_level(0-4)": r.get("price_level"),

            "address": r.get("formatted_address"),

            "lat": r.get("geometry", {}).get("location", {}).get("lat"),

            "lng": r.get("geometry", {}).get("location", {}).get("lng")

        })

    df = pd.DataFrame(rows)

    # sort: rating desc, then reviews desc

    if not df.empty:

        df = df.sort_values(by=["rating", "reviews"], ascending=[False, False], na_position="last")

    return df



def fetch_attractions(destination: str, max_results=20):

    """

    Fetch attractions/POIs.

    """

    query = f"top attractions in {destination}"

    results = places_text_search(query, GOOGLE_MAPS_API_KEY, max_results=max_results)

    rows = []

    for r in results:

        rows.append({

            "name": r.get("name"),

            "category": ", ".join(r.get("types", [])),

            "rating": r.get("rating"),

            "reviews": r.get("user_ratings_total"),

            "address": r.get("formatted_address"),

            "lat": r.get("geometry", {}).get("location", {}).get("lat"),

            "lng": r.get("geometry", {}).get("location", {}).get("lng")

        })

    df = pd.DataFrame(rows)

    if not df.empty:

        df = df.sort_values(by=["rating", "reviews"], ascending=[False, False], na_position="last")

    return df


# -----------------------------

# Budget helper

# -----------------------------

def simple_budget_breakdown(total_budget, days, travelers=1):

    """

    Very coarse split of a trip budget (adjust as needed).

    Returns per-trip & per-day guide.

    """

    total_budget = float(total_budget)

    days = max(1, int(days))

    travelers = max(1, int(travelers))


    # Example split: 45% stay, 30% food, 15% local transport, 10% activities

    stay = total_budget * 0.45

    food = total_budget * 0.30

    transport = total_budget * 0.15

    activities = total_budget * 0.10


    per_day = {

        "Stay": round(stay / days, 2),

        "Food": round(food / days, 2),

        "Local Transport": round(transport / days, 2),

        "Activities": round(activities / days, 2),

        "Total/day": round(total_budget / days, 2)

    }

    per_person = round(total_budget / travelers, 2)

    return per_day, per_person


# -----------------------------

# OpenAI itinerary helper

# -----------------------------

def generate_itinerary_with_openai(destination, start_date, days, interests, budget_hint, travelers):

    """

    Calls OpenAI Chat Completions to generate an itinerary.

    Expects OPENAI_API_KEY in env. Uses the Chat Completions HTTP endpoint.

    """

    if not OPENAI_API_KEY:

        return "OpenAI API key not set. Please set OPENAI_API_KEY in your environment."


    # Build system & user prompts

    sys_prompt = (

        "You are a helpful travel planner. Create practical, walkable day-by-day itineraries with morning, afternoon, and evening blocks, "

        "add short logistic hints, and keep it realistic for the location. Keep each day concise and bulleted."

    )

    user_prompt = (

        f"Destination: {destination}\n"

        f"Start date: {start_date}\n"

        f"Trip length: {days} days\n"

        f"Travelers: {travelers}\n"

        f"Interests: {', '.join(interests) if interests else 'general sightseeing'}\n"

        f"Budget guidance: {budget_hint}\n\n"

        "Please produce:\n"

        "1) A short overview paragraph (tone: friendly & practical)\n"

        "2) Day-by-day plan, each day with 3 bullet sections: Morning / Afternoon / Evening\n"

        "3) A compact list of neighborhood/area suggestions for dining\n"

        "4) 6 packing tips relevant to weather & activities\n"

    )


    # Minimal direct HTTP call to OpenAI Chat Completions (compatible with current API).

    url = "https://api.openai.com/v1/chat/completions"

    headers = {

        "Authorization": f"Bearer {OPENAI_API_KEY}",

        "Content-Type": "application/json",

    }

    payload = {

        "model": "gpt-4o-mini",   # pick a chat-capable model available to your account

        "messages": [

            {"role": "system", "content": sys_prompt},

            {"role": "user", "content": user_prompt}

        ],

        "temperature": 0.7,

        "max_tokens": 1200

    }


    try:

        resp = requests.post(url, headers=headers, data=json.dumps(payload), timeout=60)

        resp.raise_for_status()

        data = resp.json()

        return data["choices"][0]["message"]["content"].strip()

    except Exception as e:

        return f"Failed to generate itinerary: {e}"


# -----------------------------

# Streamlit UI

# -----------------------------

st.set_page_config(page_title="Virtual AI Travel Planner", page_icon="🧭", layout="wide")


st.title("🧭 Virtual AI Travel Planner")

st.caption("Enter your destination & budget → get hotels, attractions, and an AI-built itinerary.\n\n*Educational demo. Always verify details before booking.*")


with st.sidebar:

    st.header("🔑 API Keys")

    st.write("Set these as environment variables before running:\n- `OPENAI_API_KEY`\n- `GOOGLE_MAPS_API_KEY`")

    st.write("Detected:")

    st.code(f"OPENAI_API_KEY set: {bool(OPENAI_API_KEY)}\nGOOGLE_MAPS_API_KEY set: {bool(GOOGLE_MAPS_API_KEY)}")


col1, col2, col3 = st.columns([1.2,1,1])


with col1:

    destination = st.text_input("Destination (city/country)", placeholder="e.g., Tokyo, Japan")

    start = st.date_input("Start date", value=date.today() + timedelta(days=14))

    days = st.number_input("Trip length (days)", min_value=1, max_value=21, value=5, step=1)


with col2:

    budget = st.number_input("Total budget (your currency)", min_value=0.0, value=1000.0, step=100.0, help="Rough trip budget total")

    travelers = st.number_input("Travelers", min_value=1, value=2, step=1)

    interests = st.multiselect("Interests", [

        "Food", "Museums", "Nature", "Architecture", "Shopping", "Nightlife", "Adventure", "History", "Beaches", "Hiking"

    ], default=["Food", "Museums"])


with col3:

    max_hotels = st.slider("Max hotels to fetch", 5, 40, 15)

    max_attractions = st.slider("Max attractions to fetch", 5, 40, 20)

    run = st.button("✨ Plan my trip")


if run:

    if not destination.strip():

        st.error("Please enter a destination.")

        st.stop()


    # Budget breakdown

    per_day, per_person = simple_budget_breakdown(budget, days, travelers)

    st.subheader("💸 Budget Guide")

    c1, c2 = st.columns(2)

    with c1:

        st.write("**Per-day guide** (rough):")

        st.table(pd.DataFrame([per_day]))

    with c2:

        st.metric("Per-person total", f"{per_person:.2f}")


    # Google results

    st.subheader("🏨 Hotels (Google Places)")

    hotels_df = fetch_hotels(destination, max_results=max_hotels)

    if hotels_df.empty:

        st.info("No hotel data (check your Google API key & billing).")

    else:

        st.dataframe(hotels_df, use_container_width=True)


    st.subheader("📍 Attractions / Things to do (Google Places)")

    attractions_df = fetch_attractions(destination, max_results=max_attractions)

    if attractions_df.empty:

        st.info("No attractions data (check your Google API key & billing).")

    else:

        st.dataframe(attractions_df, use_container_width=True)


    # AI itinerary

    st.subheader("🧠 AI Itinerary")

    budget_hint = f"Total budget approx {budget} for {travelers} travelers over {days} days. Per-day guide: {per_day}."

    itinerary = generate_itinerary_with_openai(destination, start, days, interests, budget_hint, travelers)

    st.write(itinerary)


    # Optional: CSV downloads

    st.download_button(

        "⬇️ Download hotels CSV",

        data=hotels_df.to_csv(index=False).encode("utf-8"),

        file_name=f"{destination.replace(' ','_').lower()}_hotels.csv",

        mime="text/csv"

    )

    st.download_button(

        "⬇️ Download attractions CSV",

        data=attractions_df.to_csv(index=False).encode("utf-8"),

        file_name=f"{destination.replace(' ','_').lower()}_attractions.csv",

        mime="text/csv"

    )


    st.success("Done! Scroll up to view tables and itinerary. 🌍✈️")

else:

    st.info("Fill the form and click **Plan my trip**.")


Fake News Image Detector

import tkinter as tk

from tkinter import filedialog, messagebox

from PIL import Image, ImageTk

import exifread

import requests

import io


# -----------------------

# Metadata Extraction

# -----------------------

def extract_metadata(image_path):

    with open(image_path, 'rb') as f:

        tags = exifread.process_file(f)

    return {tag: str(tags[tag]) for tag in tags.keys()}


# -----------------------

# Reverse Search (TinEye / Google)

# -----------------------

def reverse_search(image_path):

    # Normally we would use an API like TinEye or Google Custom Search.

    # For demo, we simulate by uploading image to https://postimages.org and returning link.

    try:

        with open(image_path, 'rb') as f:

            files = {"file": f}

            r = requests.post("https://api.imgbb.com/1/upload",

                              files=files,

                              params={"key": "YOUR_IMGBB_API_KEY"})

            if r.status_code == 200:

                return r.json()["data"]["url"]

    except Exception as e:

        return f"Reverse search not available: {e}"

    return "Reverse search failed."


# -----------------------

# GUI Functions

# -----------------------

def open_image():

    file_path = filedialog.askopenfilename(filetypes=[("Image Files", "*.jpg;*.jpeg;*.png")])

    if not file_path:

        return


    # Show image

    img = Image.open(file_path)

    img.thumbnail((250, 250))

    img_tk = ImageTk.PhotoImage(img)

    lbl_image.config(image=img_tk)

    lbl_image.image = img_tk


    # Metadata

    metadata = extract_metadata(file_path)

    txt_metadata.delete(1.0, tk.END)

    if metadata:

        for k, v in metadata.items():

            txt_metadata.insert(tk.END, f"{k}: {v}\n")

    else:

        txt_metadata.insert(tk.END, "No metadata found.\n")


    # Reverse search (optional)

    link = reverse_search(file_path)

    txt_metadata.insert(tk.END, f"\n🔍 Reverse Search Hint: {link}\n")


# -----------------------

# Main App

# -----------------------

root = tk.Tk()

root.title("📰 Fake News Image Detector")

root.geometry("600x500")


frame_top = tk.Frame(root)

frame_top.pack(pady=10)


btn_upload = tk.Button(frame_top, text="📤 Upload Image", command=open_image, font=("Arial", 12, "bold"))

btn_upload.pack()


lbl_image = tk.Label(root)

lbl_image.pack(pady=10)


lbl_meta = tk.Label(root, text="Image Metadata & Clues:", font=("Arial", 12, "bold"))

lbl_meta.pack()


txt_metadata = tk.Text(root, wrap=tk.WORD, width=70, height=15)

txt_metadata.pack(pady=10)


root.mainloop()


Smart Parking System Simulator pro

import tkinter as tk

from tkinter import messagebox

import sqlite3

import random


# -----------------------

# Database Setup

# -----------------------

def init_db():

    conn = sqlite3.connect("parking.db")

    cur = conn.cursor()

    cur.execute(

        """CREATE TABLE IF NOT EXISTS parking_slots (

                id INTEGER PRIMARY KEY,

                slot_number TEXT UNIQUE,

                status TEXT

            )"""

    )


    # Initialize 10 slots if not exist

    cur.execute("SELECT COUNT(*) FROM parking_slots")

    count = cur.fetchone()[0]

    if count == 0:

        for i in range(1, 11):

            cur.execute("INSERT INTO parking_slots (slot_number, status) VALUES (?, ?)",

                        (f"SLOT-{i}", "Free"))

    conn.commit()

    conn.close()


# -----------------------

# Database Functions

# -----------------------

def get_slots():

    conn = sqlite3.connect("parking.db")

    cur = conn.cursor()

    cur.execute("SELECT * FROM parking_slots")

    slots = cur.fetchall()

    conn.close()

    return slots


def update_slot(slot_id, status):

    conn = sqlite3.connect("parking.db")

    cur = conn.cursor()

    cur.execute("UPDATE parking_slots SET status=? WHERE id=?", (status, slot_id))

    conn.commit()

    conn.close()


# -----------------------

# GUI Functions

# -----------------------

def refresh_slots():

    for widget in frame_slots.winfo_children():

        widget.destroy()


    slots = get_slots()

    for slot in slots:

        slot_id, slot_number, status = slot

        color = "green" if status == "Free" else "red"

        btn = tk.Button(frame_slots, text=f"{slot_number}\n{status}",

                        bg=color, fg="white", width=12, height=3,

                        command=lambda s=slot: toggle_slot(s))

        btn.pack(side=tk.LEFT, padx=5, pady=5)


def toggle_slot(slot):

    slot_id, slot_number, status = slot

    if status == "Free":

        update_slot(slot_id, "Booked")

        messagebox.showinfo("Booked", f"You booked {slot_number}")

    else:

        update_slot(slot_id, "Free")

        messagebox.showinfo("Freed", f"You freed {slot_number}")

    refresh_slots()


def random_update():

    slots = get_slots()

    random_slot = random.choice(slots)

    slot_id, slot_number, status = random_slot

    new_status = "Free" if status == "Booked" else "Booked"

    update_slot(slot_id, new_status)

    refresh_slots()

    root.after(5000, random_update)  # auto change every 5s


# -----------------------

# Main App

# -----------------------

if __name__ == "__main__":

    init_db()


    root = tk.Tk()

    root.title("Smart Parking System Simulator")

    root.geometry("800x400")


    tk.Label(root, text="🚗 Smart Parking System Simulator", font=("Arial", 16, "bold")).pack(pady=10)


    frame_slots = tk.Frame(root)

    frame_slots.pack(pady=20)


    refresh_slots()


    # Auto slot updates (simulate cars parking)

    root.after(5000, random_update)


    root.mainloop()


AI Stock Predictor (Demo)

*An AI Stock Predictor (Demo) is a project to showcase ML + finance, while keeping it clear it’s for educational purposes only.

import yfinance as yf

import pandas as pd

import numpy as np

import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split

from sklearn.linear_model import LinearRegression

from sklearn.metrics import mean_squared_error


# -------------------------

# 1. Fetch Stock Data

# -------------------------

def get_stock_data(ticker="AAPL", period="1y"):

    data = yf.download(ticker, period=period)

    return data


# -------------------------

# 2. Feature Engineering

# -------------------------

def prepare_data(data):

    data["Prediction"] = data["Close"].shift(-30)  # predict 30 days ahead

    X = np.array(data[["Close"]])

    X = X[:-30]

    y = np.array(data["Prediction"])

    y = y[:-30]

    return X, y


# -------------------------

# 3. Train Model

# -------------------------

def train_model(X, y):

    X_train, X_test, y_train, y_test = train_test_split(

        X, y, test_size=0.2, random_state=42

    )

    model = LinearRegression()

    model.fit(X_train, y_train)

    preds = model.predict(X_test)

    mse = mean_squared_error(y_test, preds)

    return model, mse, X_test, y_test, preds


# -------------------------

# 4. Forecast Future Prices

# -------------------------

def forecast(model, data):

    X_forecast = np.array(data[["Close"]])[-30:]

    forecast_pred = model.predict(X_forecast)

    return forecast_pred


# -------------------------

# 5. Visualize Results

# -------------------------

def plot_results(data, forecast_pred):

    plt.figure(figsize=(12, 6))

    data["Close"].plot(label="Actual Close Price")

    forecast_index = range(len(data), len(data) + 30)

    plt.plot(forecast_index, forecast_pred, label="Predicted Next 30 Days", color="red")

    plt.legend()

    plt.title("AI Stock Price Predictor (Demo)")

    plt.xlabel("Days")

    plt.ylabel("Price (USD)")

    plt.show()


# -------------------------

# Main

# -------------------------

if __name__ == "__main__":

    ticker = input("Enter stock ticker (default AAPL): ") or "AAPL"

    data = get_stock_data(ticker)

    print(data.tail())


    X, y = prepare_data(data)

    model, mse, X_test, y_test, preds = train_model(X, y)


    print(f" Model trained with MSE: {mse:.2f}")


    forecast_pred = forecast(model, data)


    plot_results(data, forecast_pred)


Automatic Meeting Notes Generator

import speech_recognition as sr

from transformers import pipeline

from fpdf import FPDF


# -------------------------

# 1. Record or load audio

# -------------------------

def transcribe_audio(audio_file=None, duration=30):

    recognizer = sr.Recognizer()

    if audio_file:

        with sr.AudioFile(audio_file) as source:

            audio = recognizer.record(source)

    else:

        with sr.Microphone() as source:

            print("🎙️ Recording meeting... Speak now.")

            audio = recognizer.listen(source, phrase_time_limit=duration)

    

    try:

        print("🔎 Transcribing...")

        return recognizer.recognize_google(audio)

    except sr.UnknownValueError:

        return "Could not understand audio."

    except sr.RequestError:

        return "API unavailable."


# -------------------------

# 2. Summarize transcript

# -------------------------

def summarize_text(text):

    summarizer = pipeline("summarization", model="facebook/bart-large-cnn")

    summary = summarizer(text, max_length=150, min_length=50, do_sample=False)

    return summary[0]['summary_text']


# -------------------------

# 3. Save notes to PDF

# -------------------------

def save_to_pdf(transcript, summary, filename="meeting_notes.pdf"):

    pdf = FPDF()

    pdf.add_page()

    pdf.set_font("Arial", size=12)


    pdf.multi_cell(0, 10, " Meeting Transcript:\n" + transcript + "\n\n")

    pdf.multi_cell(0, 10, " Meeting Summary:\n" + summary)


    pdf.output(filename)

    print(f"✅ Notes saved as {filename}")


# -------------------------

# Main

# -------------------------

if __name__ == "__main__":

    # Record live OR use existing audio file (WAV recommended)

    transcript = transcribe_audio(audio_file=None, duration=20)

    print("\n Transcript:\n", transcript)


    if transcript and len(transcript) > 50:

        summary = summarize_text(transcript)

        print("\n Summary:\n", summary)

        save_to_pdf(transcript, summary)

    else:

        print(" Transcript too short to summarize.")


AI-Based Recipe Generator

import requests

import pandas as pd

import openai


#  Replace with your API keys

SPOONACULAR_API_KEY = "your_spoonacular_api_key"

OPENAI_API_KEY = "your_openai_api_key"


openai.api_key = OPENAI_API_KEY


# -----------------------------------

# 1. Get recipes from Spoonacular API

# -----------------------------------

def get_recipes_from_spoonacular(ingredients, number=5):

    url = f"https://api.spoonacular.com/recipes/findByIngredients"

    params = {

        "ingredients": ingredients,

        "number": number,

        "apiKey": SPOONACULAR_API_KEY

    }

    response = requests.get(url, params=params)

    if response.status_code == 200:

        return response.json()

    else:

        print("❌ Error:", response.json())

        return []


# -----------------------------------

# 2. Get detailed nutritional info

# -----------------------------------

def get_recipe_nutrition(recipe_id):

    url = f"https://api.spoonacular.com/recipes/{recipe_id}/nutritionWidget.json"

    params = {"apiKey": SPOONACULAR_API_KEY}

    response = requests.get(url, params=params)

    if response.status_code == 200:

        return response.json()

    return {}


# -----------------------------------

# 3. Use OpenAI to generate recipe idea

# -----------------------------------

def generate_ai_recipe(ingredients):

    prompt = f"Suggest a creative recipe using these ingredients: {ingredients}. Include steps and a short description."

    

    response = openai.Completion.create(

        engine="text-davinci-003",

        prompt=prompt,

        max_tokens=300,

        temperature=0.7

    )

    return response.choices[0].text.strip()


# -----------------------------------

# Example Run

# -----------------------------------

if __name__ == "__main__":

    user_ingredients = input("Enter ingredients (comma separated): ")

    

    print("\n🍲 Fetching recipe ideas from Spoonacular...\n")

    recipes = get_recipes_from_spoonacular(user_ingredients)

    

    recipe_list = []

    for r in recipes:

        nutrition = get_recipe_nutrition(r["id"])

        recipe_list.append({

            "Title": r["title"],

            "Used Ingredients": len(r["usedIngredients"]),

            "Missed Ingredients": len(r["missedIngredients"]),

            "Calories": nutrition.get("calories", "N/A"),

            "Carbs": nutrition.get("carbs", "N/A"),

            "Protein": nutrition.get("protein", "N/A"),

            "Fat": nutrition.get("fat", "N/A")

        })

    

    df = pd.DataFrame(recipe_list)

    print(df)

    

    print("\n🤖 AI Suggested Recipe:\n")

    ai_recipe = generate_ai_recipe(user_ingredients)

    print(ai_recipe)


Smart Resume Formatter

from docx import Document

from docx.shared import Pt

from fpdf import FPDF


# ---------------------------

# 1. Format Resume into Word

# ---------------------------

def create_word_resume(data, filename="resume.docx"):

    doc = Document()

    

    # Title (Name)

    title = doc.add_paragraph(data["name"])

    title.style = doc.styles['Title']

    

    # Contact Info

    doc.add_paragraph(f'Email: {data["email"]} | Phone: {data["phone"]}')

    

    # Sections

    doc.add_heading('Summary', level=1)

    doc.add_paragraph(data["summary"])

    

    doc.add_heading('Experience', level=1)

    for job in data["experience"]:

        doc.add_paragraph(f"{job['role']} at {job['company']} ({job['years']})")

        doc.add_paragraph(job["details"], style="List Bullet")

    

    doc.add_heading('Education', level=1)

    for edu in data["education"]:

        doc.add_paragraph(f"{edu['degree']} - {edu['institution']} ({edu['year']})")

    

    doc.add_heading('Skills', level=1)

    doc.add_paragraph(", ".join(data["skills"]))

    

    doc.save(filename)

    print(f"✅ Word Resume saved as {filename}")



# ---------------------------

# 2. Format Resume into PDF

# ---------------------------

def create_pdf_resume(data, filename="resume.pdf"):

    pdf = FPDF()

    pdf.add_page()

    pdf.set_font("Arial", 'B', 16)

    

    # Title (Name)

    pdf.cell(200, 10, data["name"], ln=True, align="C")

    

    pdf.set_font("Arial", '', 12)

    pdf.cell(200, 10, f'Email: {data["email"]} | Phone: {data["phone"]}', ln=True, align="C")

    

    # Sections

    pdf.set_font("Arial", 'B', 14)

    pdf.cell(200, 10, "Summary", ln=True)

    pdf.set_font("Arial", '', 12)

    pdf.multi_cell(0, 10, data["summary"])

    

    pdf.set_font("Arial", 'B', 14)

    pdf.cell(200, 10, "Experience", ln=True)

    pdf.set_font("Arial", '', 12)

    for job in data["experience"]:

        pdf.multi_cell(0, 10, f"{job['role']} at {job['company']} ({job['years']})\n - {job['details']}")

    

    pdf.set_font("Arial", 'B', 14)

    pdf.cell(200, 10, "Education", ln=True)

    pdf.set_font("Arial", '', 12)

    for edu in data["education"]:

        pdf.cell(200, 10, f"{edu['degree']} - {edu['institution']} ({edu['year']})", ln=True)

    

    pdf.set_font("Arial", 'B', 14)

    pdf.cell(200, 10, "Skills", ln=True)

    pdf.set_font("Arial", '', 12)

    pdf.multi_cell(0, 10, ", ".join(data["skills"]))

    

    pdf.output(filename)

    print(f"✅ PDF Resume saved as {filename}")



# ---------------------------

# Example Data

# ---------------------------

resume_data = {

    "name": "John Doe",

    "email": "john.doe@email.com",

    "phone": "+1-234-567-890",

    "summary": "Passionate software engineer with 5+ years of experience in building scalable applications.",

    "experience": [

        {"role": "Backend Developer", "company": "TechCorp", "years": "2020-2023", "details": "Developed APIs and microservices using Python & Django."},

        {"role": "Software Engineer", "company": "CodeWorks", "years": "2017-2020", "details": "Worked on automation tools and optimized system performance."}

    ],

    "education": [

        {"degree": "B.Sc. Computer Science", "institution": "XYZ University", "year": "2017"}

    ],

    "skills": ["Python", "Django", "Flask", "SQL", "Docker", "AWS"]

}


# Run both functions

create_word_resume(resume_data)

create_pdf_resume(resume_data)


AI Workout Form Corrector

import cv2

import mediapipe as mp

import numpy as np


mp_drawing = mp.solutions.drawing_utils

mp_pose = mp.solutions.pose


# -----------------------

# Calculate angle between 3 points

# -----------------------

def calculate_angle(a, b, c):

    a = np.array(a)  # First

    b = np.array(b)  # Mid

    c = np.array(c)  # End

    

    radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])

    angle = np.abs(radians*180.0/np.pi)

    

    if angle > 180.0:

        angle = 360 - angle

    return angle


# -----------------------

# Main workout tracker (Squats Example)

# -----------------------

cap = cv2.VideoCapture(0)


with mp_pose.Pose(min_detection_confidence=0.7, min_tracking_confidence=0.7) as pose:

    counter = 0

    stage = None

    

    while cap.isOpened():

        ret, frame = cap.read()

        if not ret:

            break

        

        # Recolor image

        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        image.flags.writeable = False

        

        # Make detection

        results = pose.process(image)

        

        # Recolor back to BGR

        image.flags.writeable = True

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        

        try:

            landmarks = results.pose_landmarks.landmark

            

            # Get coordinates

            hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,

                   landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]

            knee = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,

                    landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]

            ankle = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x,

                     landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]

            

            # Calculate angle

            angle = calculate_angle(hip, knee, ankle)

            

            # Visualize angle

            cv2.putText(image, str(int(angle)),

                        tuple(np.multiply(knee, [640, 480]).astype(int)),

                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA

                        )

            

            # Squat counter logic

            if angle > 160:

                stage = "up"

            if angle < 90 and stage == "up":

                stage = "down"

                counter += 1

                print(f"✅ Squat count: {counter}")

            

            # Feedback

            if angle < 70:

                feedback = "Too Low! Go Higher"

            elif 70 <= angle <= 100:

                feedback = "Perfect Depth ✅"

            else:

                feedback = "Stand Tall"

            

            cv2.putText(image, feedback, (50,100),

                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2, cv2.LINE_AA)

            

        except:

            pass

        

        # Render detections

        mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,

                                  mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),

                                  mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)

                                 )               

        

        cv2.imshow('AI Workout Form Corrector - Squats', image)

        

        if cv2.waitKey(10) & 0xFF == ord('q'):

            break

    

    cap.release()

    cv2.destroyAllWindows()


Voice Emotion Detector

import os

import librosa

import numpy as np

import sounddevice as sd

import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split

from sklearn.svm import SVC

from sklearn.preprocessing import LabelEncoder, StandardScaler

import pickle


# -----------------------

# STEP 1: Feature Extraction

# -----------------------

def extract_features(file_path):

    y, sr = librosa.load(file_path, duration=3, offset=0.5)

    mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)

    chroma = librosa.feature.chroma_stft(y=y, sr=sr)

    mel = librosa.feature.melspectrogram(y=y, sr=sr)

    

    # Take mean of each feature

    mfccs = np.mean(mfcc.T, axis=0)

    chroma = np.mean(chroma.T, axis=0)

    mel = np.mean(mel.T, axis=0)


    return np.hstack([mfccs, chroma, mel])


# -----------------------

# STEP 2: Training (Demo Dataset Simulation)

# -----------------------

def train_model():

    # Normally, load a dataset (RAVDESS, CREMA-D etc.)

    # Here, we'll simulate with few .wav files in "dataset/" folder

    

    emotions = {

        "angry": "angry",

        "happy": "happy",

        "sad": "sad",

        "neutral": "neutral"

    }

    

    X, y = [], []

    dataset_path = "dataset"  # folder with wav files: angry1.wav, happy2.wav, etc.

    

    for file in os.listdir(dataset_path):

        if file.endswith(".wav"):

            label = file.split("_")[0]  # e.g., angry_1.wav → "angry"

            feature = extract_features(os.path.join(dataset_path, file))

            X.append(feature)

            y.append(label)

    

    X = np.array(X)

    y = np.array(y)

    

    # Encode labels

    encoder = LabelEncoder()

    y = encoder.fit_transform(y)

    

    scaler = StandardScaler()

    X = scaler.fit_transform(X)

    

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    

    model = SVC(kernel="linear", probability=True)

    model.fit(X_train, y_train)

    

    acc = model.score(X_test, y_test)

    print(f"Model trained with accuracy: {acc*100:.2f}%")

    

    # Save model

    with open("emotion_model.pkl", "wb") as f:

        pickle.dump((model, encoder, scaler), f)


# -----------------------

# STEP 3: Record & Predict

# -----------------------

def record_and_predict(duration=3, fs=22050):

    print("Recording...")

    recording = sd.rec(int(duration * fs), samplerate=fs, channels=1)

    sd.wait()

    print("Recording complete. Saving as temp.wav...")

    librosa.output.write_wav("temp.wav", recording.flatten(), sr=fs)


    with open("emotion_model.pkl", "rb") as f:

        model, encoder, scaler = pickle.load(f)

    

    features = extract_features("temp.wav").reshape(1, -1)

    features = scaler.transform(features)

    pred = model.predict(features)[0]

    probas = model.predict_proba(features)[0]

    

    emotion = encoder.inverse_transform([pred])[0]

    print(f"Detected Emotion: {emotion}")

    

    # Plot probabilities

    plt.bar(encoder.classes_, probas)

    plt.title("Emotion Prediction Confidence")

    plt.show()


# -----------------------

# MAIN

# -----------------------

if __name__ == "__main__":

    if not os.path.exists("emotion_model.pkl"):

        print("Training model...")

        train_model()

    

    record_and_predict()