Dynamic QR Code Generator

Project Structure

dynamic_qr/

├── app.py

├── templates/

│   ├── index.html

│   ├── qr_display.html

│   ├── update.html

└── qr_data.db



 app.py

from flask import Flask, render_template, request, redirect, url_for

import qrcode

import sqlite3

import io, base64


app = Flask(__name__)

DB = 'qr_data.db'


# --- Initialize Database ---

def init_db():

    conn = sqlite3.connect(DB)

    c = conn.cursor()

    c.execute('''CREATE TABLE IF NOT EXISTS qr_links (

                    id INTEGER PRIMARY KEY AUTOINCREMENT,

                    code TEXT UNIQUE,

                    target_url TEXT

                )''')

    conn.commit()

    conn.close()


init_db()


# --- Generate Dynamic QR ---

@app.route('/', methods=['GET', 'POST'])

def index():

    if request.method == 'POST':

        link = request.form['link']

        code = str(hash(link))[-6:]  # simple code for uniqueness

        short_url = request.host_url + "r/" + code


        conn = sqlite3.connect(DB)

        c = conn.cursor()

        c.execute("INSERT OR REPLACE INTO qr_links (code, target_url) VALUES (?,?)", (code, link))

        conn.commit()

        conn.close()


        # Generate QR

        img = qrcode.make(short_url)

        buf = io.BytesIO()

        img.save(buf, format='PNG')

        qr_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')


        return render_template('qr_display.html', qr_img=qr_base64, short_url=short_url)

    return render_template('index.html')



# --- Redirect Handler ---

@app.route('/r/<code>')

def redirect_qr(code):

    conn = sqlite3.connect(DB)

    c = conn.cursor()

    c.execute("SELECT target_url FROM qr_links WHERE code=?", (code,))

    row = c.fetchone()

    conn.close()

    if row:

        return redirect(row[0])

    return "QR link not found!", 404



# --- Update Target URL ---

@app.route('/update', methods=['GET', 'POST'])

def update_qr():

    if request.method == 'POST':

        code = request.form['code']

        new_link = request.form['new_link']

        conn = sqlite3.connect(DB)

        c = conn.cursor()

        c.execute("UPDATE qr_links SET target_url=? WHERE code=?", (new_link, code))

        conn.commit()

        conn.close()

        return " Updated successfully!"

    return render_template('update.html')



if __name__ == "__main__":

    app.run(debug=True)

templates/index.html

<!DOCTYPE html>
<html>
<head>
    <title>Dynamic QR Generator</title>
</head>
<body>
    <h2> Generate Dynamic QR Code</h2>
    <form method="POST">
        <input type="text" name="link" placeholder="Enter destination URL" required>
        <button type="submit">Generate QR</button>
    </form>
    <br>
    <a href="/update">Update existing QR link</a>
</body>
</html>

templates/qr_display.html

<!DOCTYPE html>
<html>
<head><title>QR Generated</title></head>
<body>
    <h2> Your Dynamic QR Code</h2>
    <img src="data:image/png;base64,{{ qr_img }}" alt="QR Code"><br>
    <p>Share this link: <b>{{ short_url }}</b></p>
    <a href="/">Generate another</a>
</body>
</html>
templates/update.html

<!DOCTYPE html>
<html>
<head><title>Update QR Link</title></head>
<body>
    <h2>Update QR Destination</h2>
    <form method="POST">
        <input type="text" name="code" placeholder="Enter QR code ID" required><br><br>
        <input type="text" name="new_link" placeholder="Enter new destination URL" required><br><br>
        <button type="submit">Update</button>
    </form>
</body>
</html>

Smart Resume Gap Detector

pip install spacy pandas dateparser

python -m spacy download en_core_web_sm


import re

import spacy

import pandas as pd

import dateparser

from datetime import datetime

from pathlib import Path


# Optional PDF reader (only if you want to support .pdf)

try:

    import fitz  # PyMuPDF

except ImportError:

    fitz = None


nlp = spacy.load("en_core_web_sm")


# ----------------------------

# Skill upgrade recommendations

# ----------------------------

SKILL_UPGRADE_MAP = {

    "developer": ["AI/ML fundamentals", "Cloud platforms (AWS, Azure, GCP)", "DevOps basics"],

    "data": ["Data visualization (Power BI, Tableau)", "SQL optimization", "Machine learning pipelines"],

    "designer": ["Figma advanced", "UI motion design", "UX research"],

    "manager": ["Agile certification (Scrum)", "People analytics", "Data-driven decision-making"],

    "analyst": ["Data storytelling", "Python for data", "Business intelligence tools"],

    "tester": ["Automation (Selenium, Cypress)", "Performance testing", "API testing"],

    "student": ["Internships", "Portfolio projects", "Personal GitHub projects"],

}



# ----------------------------

# Helper functions

# ----------------------------


def extract_text(file_path):

    """Extracts text from .txt or .pdf resume"""

    p = Path(file_path)

    if not p.exists():

        raise FileNotFoundError(p)

    if p.suffix.lower() == ".pdf" and fitz:

        doc = fitz.open(file_path)

        text = " ".join([page.get_text("text") for page in doc])

        return text

    elif p.suffix.lower() == ".txt":

        return open(file_path, "r", encoding="utf-8").read()

    else:

        raise ValueError("Please provide a .txt or .pdf file")



def extract_date_ranges(text):

    """Finds date ranges in resume text"""

    # Match patterns like: Jan 2018 - Mar 2020, 2015–2017, July 2019 to Present, etc.

    pattern = r"([A-Za-z]{3,9}\s*\d{4}|\d{4})\s*(?:-|to|–|—)\s*(Present|[A-Za-z]{3,9}\s*\d{4}|\d{4})"

    matches = re.findall(pattern, text, flags=re.IGNORECASE)

    

    date_pairs = []

    for start, end in matches:

        start_date = dateparser.parse(start)

        end_date = datetime.now() if re.search("present", end, re.I) else dateparser.parse(end)

        if start_date and end_date:

            date_pairs.append((start_date.date(), end_date.date()))

    return date_pairs



def detect_jobs(text):

    """Extracts potential job titles using NLP entities + heuristics"""

    doc = nlp(text)

    job_titles = []

    for ent in doc.ents:

        if ent.label_ in ["ORG", "WORK_OF_ART"]:

            continue

        # Common title indicators

        if re.search(r"(developer|engineer|manager|designer|analyst|intern|tester|consultant|officer)", ent.text, re.I):

            job_titles.append(ent.text.strip())

    return list(set(job_titles))



def calculate_gaps(date_pairs):

    """Find time gaps between consecutive jobs"""

    if not date_pairs:

        return []

    date_pairs = sorted(date_pairs, key=lambda x: x[0])

    gaps = []

    for i in range(1, len(date_pairs)):

        prev_end = date_pairs[i - 1][1]

        curr_start = date_pairs[i][0]

        gap_days = (curr_start - prev_end).days

        if gap_days > 60:  # > 2 months considered a gap

            gaps.append({

                "gap_start": prev_end,

                "gap_end": curr_start,

                "gap_months": round(gap_days / 30.4, 1)

            })

    return gaps



def suggest_skills(jobs):

    """Suggest skills based on last known job title"""

    if not jobs:

        return SKILL_UPGRADE_MAP["student"]

    last_job = jobs[-1].lower()

    for key, recs in SKILL_UPGRADE_MAP.items():

        if key in last_job:

            return recs

    return ["Explore AI basics", "Cloud fundamentals", "Soft skill enhancement"]



# ----------------------------

# Main pipeline

# ----------------------------


def analyze_resume(file_path):

    text = extract_text(file_path)


    date_pairs = extract_date_ranges(text)

    jobs = detect_jobs(text)

    gaps = calculate_gaps(date_pairs)

    skill_recs = suggest_skills(jobs)


    print("\n Analyzing Resume:", file_path)

    print("=" * 60)

    print(f" Detected job titles: {', '.join(jobs) if jobs else 'None found'}")

    print(f" Work periods found: {len(date_pairs)}")


    if gaps:

        print("\n Career Gaps Detected:")

        for g in gaps:

            print(f"   - {g['gap_start']} → {g['gap_end']} ({g['gap_months']} months)")

    else:

        print("\n No significant gaps detected.")


    print("\n Skill Upgrade Suggestions:")

    for s in skill_recs:

        print("   •", s)


    # Optional: return structured result

    result = {

        "jobs": jobs,

        "dates": date_pairs,

        "gaps": gaps,

        "suggestions": skill_recs

    }

    return result



# ----------------------------

# Run Example

# ----------------------------

if __name__ == "__main__":

    import argparse

    parser = argparse.ArgumentParser(description="Smart Resume Gap Detector")

    parser.add_argument("resume_file", help="Path to resume (.txt or .pdf)")

    args = parser.parse_args()


    analyze_resume(args.resume_file)


AI Whiteboard Digitizer

#!/usr/bin/env python3

"""

AI Whiteboard Digitizer (prototype)


Usage:

    python whiteboard_digitizer.py input_image.jpg


Outputs:

 - ocr_texts.txt          : OCR'd text lines (raw)

 - equations_latex.tex    : LaTeX for parseable math expressions

 - diagram.svg (or diagram.png) : vector-like rendering of detected lines/circles

 - several debug images in ./debug_*.png

"""


import sys

import os

import cv2

import numpy as np

from PIL import Image

import pytesseract

from sympy import sympify, latex

from sympy.core.sympify import SympifyError

import svgwrite

import matplotlib.pyplot as plt


# If on Windows and tesseract is not in PATH, set path here (uncomment and adjust)

# pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"


# ---------- Utilities ----------

def ensure_dir(path):

    if not os.path.exists(path):

        os.makedirs(path)


# Preprocess: grayscale, denoise, adaptive threshold, deskew

def preprocess_image(img_bgr, max_dim=1600):

    # Resize to manageable size, keep aspect

    h, w = img_bgr.shape[:2]

    scale = min(1.0, float(max_dim) / max(h, w))

    if scale != 1.0:

        img_bgr = cv2.resize(img_bgr, (int(w*scale), int(h*scale)), interpolation=cv2.INTER_AREA)

    gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)

    # Denoise

    gray = cv2.fastNlMeansDenoising(gray, None, 10, 7, 21)

    # Bilateral to preserve edges

    gray = cv2.bilateralFilter(gray, 9, 75, 75)

    # Adaptive threshold (whiteboard: dark text on light background)

    th = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,

                               cv2.THRESH_BINARY_INV, 25, 12)

    # Deskew based on largest text contours or Hough lines

    coords = np.column_stack(np.where(th > 0))

    if coords.shape[0] > 0:

        angle = cv2.minAreaRect(coords)[-1]

        # Correction of angle

        if angle < -45:

            angle = -(90 + angle)

        else:

            angle = -angle

        # Rotate

        (h2, w2) = gray.shape[:2]

        M = cv2.getRotationMatrix2D((w2//2, h2//2), angle, 1.0)

        gray = cv2.warpAffine(gray, M, (w2, h2), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)

        th = cv2.warpAffine(th, M, (w2, h2), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)

    return gray, th


# Find text boxes using MSER or connected components (we'll use morphological dilation + contours)

def detect_text_regions(thresh_img, min_area=200, debug_out=None):

    # dilate to join letters into words/lines

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 5))

    dil = cv2.dilate(thresh_img, kernel, iterations=2)

    contours, _ = cv2.findContours(dil, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    boxes = []

    for cnt in contours:

        x, y, w, h = cv2.boundingRect(cnt)

        if w*h < min_area:

            continue

        # filter very tall or very wide noise

        if h < 10 or w < 20:

            continue

        boxes.append((x, y, w, h))

    # sort top to bottom, left to right

    boxes = sorted(boxes, key=lambda b: (b[1], b[0]))

    if debug_out is not None:

        vis = cv2.cvtColor(thresh_img, cv2.COLOR_GRAY2BGR)

        for (x,y,w,h) in boxes:

            cv2.rectangle(vis, (x,y), (x+w,y+h), (0,255,0), 2)

        cv2.imwrite(debug_out, vis)

    return boxes


# OCR each region (use appropriate psm)

def ocr_regions(gray_img, boxes, ocr_lang='eng'):

    lines = []

    for (x, y, w, h) in boxes:

        pad = 4

        x0 = max(0, x-pad)

        y0 = max(0, y-pad)

        x1 = min(gray_img.shape[1], x+w+pad)

        y1 = min(gray_img.shape[0], y+h+pad)

        crop = gray_img[y0:y1, x0:x1]

        # increase contrast and invert if necessary

        # Convert to PIL for pytesseract

        pil = Image.fromarray(crop)

        # Tesseract config: treat as a single line or single block

        config = "--psm 7"  # treat as a single text line (good for equations on a line)

        text = pytesseract.image_to_string(pil, lang=ocr_lang, config=config)

        text = text.strip()

        if text:

            lines.append({'box': (x0,y0,x1,y1), 'text': text})

    return lines


# Heuristic to check if a line looks like an equation/expression

def looks_like_equation(s):

    # Accept digits, letters, operators and = ^ / * + - parentheses, fractions-like '/', greek? etc.

    import re

    s2 = s.replace(' ', '')

    # Must have at least one operator or equal sign or variable

    if re.search(r'[=\+\-\*/\^]', s2):

        return True

    # Or something like 'lim', 'sin', 'cos' etc.

    if re.search(r'\b(sin|cos|tan|log|ln|lim|sqrt)\b', s.lower()):

        return True

    # Or presence of digits next to letters (like 2x or x2)

    if re.search(r'\d+[a-zA-Z]|[a-zA-Z]\d+', s):

        return True

    return False


# Clean OCR text for sympy: replace common OCR artifacts

def clean_ocr_for_sympy(s):

    # Basic replacements; adapt as needed

    repl = {

        '×': '*',

        'X': 'x',

        '—': '-',

        '−': '-',

        '–': '-',

        '÷': '/',

        '’': "'",

        '‘': "'",

        '“': '"',

        '”': '"',

        'O': '0',  # risky: only if look like zero

    }

    out = s

    # Remove stray non-ascii except math symbols

    for k,v in repl.items():

        out = out.replace(k, v)

    # convert superscript-like to **: e.g., x^2 -> x**2 (keep ^ too)

    out = out.replace('^', '**')

    # Remove weird characters

    allowed = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+-*/=().,** _^"

    # keep letters and mathematical common symbols; but avoid removing letters like greek

    # Simpler: strip only control characters

    out = ''.join(ch for ch in out if (ch.isprintable()))

    out = out.strip()

    return out


# Try to parse with sympy and produce latex

def parse_equation_to_latex(s):

    s_clean = clean_ocr_for_sympy(s)

    # If contains '=' treat as equation; else expression

    try:

        if '=' in s_clean:

            # sympy's Eq expects left and right; split on first '='

            left, right = s_clean.split('=', 1)

            eleft = sympify(left)

            eright = sympify(right)

            eq = eleft - eright  # expression equal zero

            # represent as LaTeX equation

            latex_str = latex(eleft) + " = " + latex(eright)

        else:

            expr = sympify(s_clean)

            latex_str = latex(expr)

        return latex_str, None

    except SympifyError as e:

        return None, f"SympifyError: {e}"

    except Exception as e:

        return None, str(e)


# Detect simple geometric primitives (lines via Hough, circles via HoughCircles)

def detect_shapes(gray_img, debug_prefix=None):

    # Use edge detection

    edges = cv2.Canny(gray_img, 50, 150, apertureSize=3)

    # Hough lines

    lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=80, minLineLength=50, maxLineGap=10)

    line_list = []

    if lines is not None:

        for l in lines:

            x1,y1,x2,y2 = l[0]

            line_list.append((int(x1),int(y1),int(x2),int(y2)))

    # Hough circles

    circles = None

    try:

        circ = cv2.HoughCircles(gray_img, cv2.HOUGH_GRADIENT, dp=1.2, minDist=30,

                                param1=100, param2=30, minRadius=8, maxRadius=200)

        if circ is not None:

            circ = np.round(circ[0, :]).astype("int")

            circles = [(int(x),int(y),int(r)) for (x,y,r) in circ]

    except Exception:

        circles = None

    # Save debug

    if debug_prefix:

        vis = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR)

        for (x1,y1,x2,y2) in line_list:

            cv2.line(vis, (x1,y1), (x2,y2), (0,255,0), 2)

        if circles:

            for (x,y,r) in circles:

                cv2.circle(vis, (x,y), r, (0,0,255), 2)

        cv2.imwrite(f"{debug_prefix}_shapes.png", vis)

    return line_list, circles


# Render vector-style diagram to SVG using svgwrite or Matplotlib

def render_vector_diagram(svg_path, image_size, lines, circles, boxes=None):

    w, h = image_size

    dwg = svgwrite.Drawing(svg_path, size=(w, h))

    # background white

    dwg.add(dwg.rect(insert=(0,0), size=(w,h), fill='white'))

    # optional: draw boxes for text regions (thin gray)

    if boxes:

        for (x0,y0,x1,y1) in boxes:

            dwg.add(dwg.rect(insert=(x0,y0), size=(x1-x0,y1-y0), fill='none', stroke='lightgray', stroke_width=1))

    # draw lines

    for (x1,y1,x2,y2) in lines:

        dwg.add(dwg.line(start=(x1,y1), end=(x2,y2), stroke=svgwrite.rgb(10, 10, 16, '%'), stroke_width=2))

    # draw circles

    if circles:

        for (x,y,r) in circles:

            dwg.add(dwg.circle(center=(x,y), r=r, stroke='black', fill='none', stroke_width=2))

    dwg.save()


# Main pipeline

def process_whiteboard_image(in_path, out_dir="wb_outputs"):

    ensure_dir(out_dir)

    img_bgr = cv2.imread(in_path)

    if img_bgr is None:

        raise FileNotFoundError(in_path)

    gray, th = preprocess_image(img_bgr)

    debug_pre = os.path.join(out_dir, "debug_preprocess.png")

    cv2.imwrite(debug_pre, gray)

    cv2.imwrite(os.path.join(out_dir, "debug_thresh.png"), th)


    # detect regions (text lines)

    boxes = detect_text_regions(th, debug_out=os.path.join(out_dir, "debug_boxes.png"))

    # OCR

    ocr_lines = ocr_regions(gray, boxes)

    # write OCR results

    ocr_txt_file = os.path.join(out_dir, "ocr_texts.txt")

    with open(ocr_txt_file, "w", encoding='utf-8') as f:

        for item in ocr_lines:

            f.write(item['text'] + "\n")

    print(f"[+] OCR lines saved to {ocr_txt_file}")


    # Filter likely equations

    eq_candidates = [it for it in ocr_lines if looks_like_equation(it['text'])]

    latex_results = []

    for it in eq_candidates:

        txt = it['text']

        latex_str, err = parse_equation_to_latex(txt)

        if latex_str:

            latex_results.append((txt, latex_str))

        else:

            latex_results.append((txt, f"UNPARSEABLE: {err}"))

    # write equations latex

    eq_file = os.path.join(out_dir, "equations_latex.tex")

    with open(eq_file, "w", encoding='utf-8') as f:

        f.write("% Generated LaTeX (auto) — review and correct as needed\n")

        for orig, out in latex_results:

            f.write("% OCR: " + orig.replace("\n"," ") + "\n")

            f.write(out + "\n\n")

    print(f"[+] Equation LaTeX saved to {eq_file}")


    # shape detection

    lines, circles = detect_shapes(gray, debug_prefix=os.path.join(out_dir, "debug"))

    svg_path = os.path.join(out_dir, "diagram.svg")

    # convert boxes to x0,y0,x1,y1 format for svg (optional)

    bboxes = [(x,y,x+w,y+h) for (x,y,w,h) in boxes]

    render_vector_diagram(svg_path, (gray.shape[1], gray.shape[0]), lines, circles, boxes=bboxes)

    print(f"[+] Diagram SVG saved to {svg_path}")


    # Also produce a matplotlib PNG overlay visualization

    overlay = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)

    for (x1,y1,x2,y2) in lines:

        cv2.line(overlay, (x1,y1), (x2,y2), (0,255,0), 2)

    if circles:

        for (x,y,r) in circles:

            cv2.circle(overlay, (x,y), r, (0,0,255), 2)

    for (x0,y0,x1,y1) in bboxes:

        cv2.rectangle(overlay, (x0,y0), (x1,y1), (255,0,0), 1)

    cv2.imwrite(os.path.join(out_dir, "overlay_debug.png"), overlay)

    print(f"[+] Debug overlay saved to {os.path.join(out_dir, 'overlay_debug.png')}")

    return {

        "ocr_lines": ocr_lines,

        "equations": latex_results,

        "svg": svg_path,

        "debug": out_dir

    }


# --------- CLI ----------

if __name__ == "__main__":

    if len(sys.argv) < 2:

        print("Usage: python whiteboard_digitizer.py input_image.jpg")

        sys.exit(1)

    inp = sys.argv[1]

    out = "wb_outputs"

    res = process_whiteboard_image(inp, out_dir=out)

    print("Done. Outputs in:", out)


Virtual AI Travel Planner

import os

import json

import time

import requests

import pandas as pd

import streamlit as st

from datetime import date, timedelta


# -----------------------------

# Setup: API Keys from env vars

# -----------------------------

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")

GOOGLE_MAPS_API_KEY = os.getenv("GOOGLE_MAPS_API_KEY", "")


# -----------------------------

# Google Places helpers

# -----------------------------

PLACES_TEXT_SEARCH_URL = "https://maps.googleapis.com/maps/api/place/textsearch/json"


def places_text_search(query, key, max_results=20):

    """

    Simple wrapper for Google Places 'Text Search' API.

    We page through results (up to ~60) but cap by max_results.

    """

    if not key:

        return []


    params = {

        "query": query,

        "key": key,

    }

    out = []

    next_page_token = None

    while True:

        if next_page_token:

            params["pagetoken"] = next_page_token

            # Google requires short delay before using next_page_token

            time.sleep(2)


        r = requests.get(PLACES_TEXT_SEARCH_URL, params=params, timeout=30)

        if r.status_code != 200:

            break

        data = r.json()

        out.extend(data.get("results", []))


        next_page_token = data.get("next_page_token")

        if not next_page_token or len(out) >= max_results:

            break


    return out[:max_results]



def fetch_hotels(destination: str, max_results=20):

    """

    Fetch hotels around destination. Uses price_level (0-4) and rating when available.

    """

    results = places_text_search(f"hotels in {destination}", GOOGLE_MAPS_API_KEY, max_results=max_results)

    rows = []

    for r in results:

        rows.append({

            "name": r.get("name"),

            "rating": r.get("rating"),

            "reviews": r.get("user_ratings_total"),

            "price_level(0-4)": r.get("price_level"),

            "address": r.get("formatted_address"),

            "lat": r.get("geometry", {}).get("location", {}).get("lat"),

            "lng": r.get("geometry", {}).get("location", {}).get("lng")

        })

    df = pd.DataFrame(rows)

    # sort: rating desc, then reviews desc

    if not df.empty:

        df = df.sort_values(by=["rating", "reviews"], ascending=[False, False], na_position="last")

    return df



def fetch_attractions(destination: str, max_results=20):

    """

    Fetch attractions/POIs.

    """

    query = f"top attractions in {destination}"

    results = places_text_search(query, GOOGLE_MAPS_API_KEY, max_results=max_results)

    rows = []

    for r in results:

        rows.append({

            "name": r.get("name"),

            "category": ", ".join(r.get("types", [])),

            "rating": r.get("rating"),

            "reviews": r.get("user_ratings_total"),

            "address": r.get("formatted_address"),

            "lat": r.get("geometry", {}).get("location", {}).get("lat"),

            "lng": r.get("geometry", {}).get("location", {}).get("lng")

        })

    df = pd.DataFrame(rows)

    if not df.empty:

        df = df.sort_values(by=["rating", "reviews"], ascending=[False, False], na_position="last")

    return df


# -----------------------------

# Budget helper

# -----------------------------

def simple_budget_breakdown(total_budget, days, travelers=1):

    """

    Very coarse split of a trip budget (adjust as needed).

    Returns per-trip & per-day guide.

    """

    total_budget = float(total_budget)

    days = max(1, int(days))

    travelers = max(1, int(travelers))


    # Example split: 45% stay, 30% food, 15% local transport, 10% activities

    stay = total_budget * 0.45

    food = total_budget * 0.30

    transport = total_budget * 0.15

    activities = total_budget * 0.10


    per_day = {

        "Stay": round(stay / days, 2),

        "Food": round(food / days, 2),

        "Local Transport": round(transport / days, 2),

        "Activities": round(activities / days, 2),

        "Total/day": round(total_budget / days, 2)

    }

    per_person = round(total_budget / travelers, 2)

    return per_day, per_person


# -----------------------------

# OpenAI itinerary helper

# -----------------------------

def generate_itinerary_with_openai(destination, start_date, days, interests, budget_hint, travelers):

    """

    Calls OpenAI Chat Completions to generate an itinerary.

    Expects OPENAI_API_KEY in env. Uses the Chat Completions HTTP endpoint.

    """

    if not OPENAI_API_KEY:

        return "OpenAI API key not set. Please set OPENAI_API_KEY in your environment."


    # Build system & user prompts

    sys_prompt = (

        "You are a helpful travel planner. Create practical, walkable day-by-day itineraries with morning, afternoon, and evening blocks, "

        "add short logistic hints, and keep it realistic for the location. Keep each day concise and bulleted."

    )

    user_prompt = (

        f"Destination: {destination}\n"

        f"Start date: {start_date}\n"

        f"Trip length: {days} days\n"

        f"Travelers: {travelers}\n"

        f"Interests: {', '.join(interests) if interests else 'general sightseeing'}\n"

        f"Budget guidance: {budget_hint}\n\n"

        "Please produce:\n"

        "1) A short overview paragraph (tone: friendly & practical)\n"

        "2) Day-by-day plan, each day with 3 bullet sections: Morning / Afternoon / Evening\n"

        "3) A compact list of neighborhood/area suggestions for dining\n"

        "4) 6 packing tips relevant to weather & activities\n"

    )


    # Minimal direct HTTP call to OpenAI Chat Completions (compatible with current API).

    url = "https://api.openai.com/v1/chat/completions"

    headers = {

        "Authorization": f"Bearer {OPENAI_API_KEY}",

        "Content-Type": "application/json",

    }

    payload = {

        "model": "gpt-4o-mini",   # pick a chat-capable model available to your account

        "messages": [

            {"role": "system", "content": sys_prompt},

            {"role": "user", "content": user_prompt}

        ],

        "temperature": 0.7,

        "max_tokens": 1200

    }


    try:

        resp = requests.post(url, headers=headers, data=json.dumps(payload), timeout=60)

        resp.raise_for_status()

        data = resp.json()

        return data["choices"][0]["message"]["content"].strip()

    except Exception as e:

        return f"Failed to generate itinerary: {e}"


# -----------------------------

# Streamlit UI

# -----------------------------

st.set_page_config(page_title="Virtual AI Travel Planner", page_icon="🧭", layout="wide")


st.title("🧭 Virtual AI Travel Planner")

st.caption("Enter your destination & budget → get hotels, attractions, and an AI-built itinerary.\n\n*Educational demo. Always verify details before booking.*")


with st.sidebar:

    st.header("šŸ”‘ API Keys")

    st.write("Set these as environment variables before running:\n- `OPENAI_API_KEY`\n- `GOOGLE_MAPS_API_KEY`")

    st.write("Detected:")

    st.code(f"OPENAI_API_KEY set: {bool(OPENAI_API_KEY)}\nGOOGLE_MAPS_API_KEY set: {bool(GOOGLE_MAPS_API_KEY)}")


col1, col2, col3 = st.columns([1.2,1,1])


with col1:

    destination = st.text_input("Destination (city/country)", placeholder="e.g., Tokyo, Japan")

    start = st.date_input("Start date", value=date.today() + timedelta(days=14))

    days = st.number_input("Trip length (days)", min_value=1, max_value=21, value=5, step=1)


with col2:

    budget = st.number_input("Total budget (your currency)", min_value=0.0, value=1000.0, step=100.0, help="Rough trip budget total")

    travelers = st.number_input("Travelers", min_value=1, value=2, step=1)

    interests = st.multiselect("Interests", [

        "Food", "Museums", "Nature", "Architecture", "Shopping", "Nightlife", "Adventure", "History", "Beaches", "Hiking"

    ], default=["Food", "Museums"])


with col3:

    max_hotels = st.slider("Max hotels to fetch", 5, 40, 15)

    max_attractions = st.slider("Max attractions to fetch", 5, 40, 20)

    run = st.button("✨ Plan my trip")


if run:

    if not destination.strip():

        st.error("Please enter a destination.")

        st.stop()


    # Budget breakdown

    per_day, per_person = simple_budget_breakdown(budget, days, travelers)

    st.subheader("šŸ’ø Budget Guide")

    c1, c2 = st.columns(2)

    with c1:

        st.write("**Per-day guide** (rough):")

        st.table(pd.DataFrame([per_day]))

    with c2:

        st.metric("Per-person total", f"{per_person:.2f}")


    # Google results

    st.subheader("šŸØ Hotels (Google Places)")

    hotels_df = fetch_hotels(destination, max_results=max_hotels)

    if hotels_df.empty:

        st.info("No hotel data (check your Google API key & billing).")

    else:

        st.dataframe(hotels_df, use_container_width=True)


    st.subheader("šŸ“ Attractions / Things to do (Google Places)")

    attractions_df = fetch_attractions(destination, max_results=max_attractions)

    if attractions_df.empty:

        st.info("No attractions data (check your Google API key & billing).")

    else:

        st.dataframe(attractions_df, use_container_width=True)


    # AI itinerary

    st.subheader("🧠 AI Itinerary")

    budget_hint = f"Total budget approx {budget} for {travelers} travelers over {days} days. Per-day guide: {per_day}."

    itinerary = generate_itinerary_with_openai(destination, start, days, interests, budget_hint, travelers)

    st.write(itinerary)


    # Optional: CSV downloads

    st.download_button(

        "⬇️ Download hotels CSV",

        data=hotels_df.to_csv(index=False).encode("utf-8"),

        file_name=f"{destination.replace(' ','_').lower()}_hotels.csv",

        mime="text/csv"

    )

    st.download_button(

        "⬇️ Download attractions CSV",

        data=attractions_df.to_csv(index=False).encode("utf-8"),

        file_name=f"{destination.replace(' ','_').lower()}_attractions.csv",

        mime="text/csv"

    )


    st.success("Done! Scroll up to view tables and itinerary. šŸŒ✈️")

else:

    st.info("Fill the form and click **Plan my trip**.")


Fake News Image Detector

import tkinter as tk

from tkinter import filedialog, messagebox

from PIL import Image, ImageTk

import exifread

import requests

import io


# -----------------------

# Metadata Extraction

# -----------------------

def extract_metadata(image_path):

    with open(image_path, 'rb') as f:

        tags = exifread.process_file(f)

    return {tag: str(tags[tag]) for tag in tags.keys()}


# -----------------------

# Reverse Search (TinEye / Google)

# -----------------------

def reverse_search(image_path):

    # Normally we would use an API like TinEye or Google Custom Search.

    # For demo, we simulate by uploading image to https://postimages.org and returning link.

    try:

        with open(image_path, 'rb') as f:

            files = {"file": f}

            r = requests.post("https://api.imgbb.com/1/upload",

                              files=files,

                              params={"key": "YOUR_IMGBB_API_KEY"})

            if r.status_code == 200:

                return r.json()["data"]["url"]

    except Exception as e:

        return f"Reverse search not available: {e}"

    return "Reverse search failed."


# -----------------------

# GUI Functions

# -----------------------

def open_image():

    file_path = filedialog.askopenfilename(filetypes=[("Image Files", "*.jpg;*.jpeg;*.png")])

    if not file_path:

        return


    # Show image

    img = Image.open(file_path)

    img.thumbnail((250, 250))

    img_tk = ImageTk.PhotoImage(img)

    lbl_image.config(image=img_tk)

    lbl_image.image = img_tk


    # Metadata

    metadata = extract_metadata(file_path)

    txt_metadata.delete(1.0, tk.END)

    if metadata:

        for k, v in metadata.items():

            txt_metadata.insert(tk.END, f"{k}: {v}\n")

    else:

        txt_metadata.insert(tk.END, "No metadata found.\n")


    # Reverse search (optional)

    link = reverse_search(file_path)

    txt_metadata.insert(tk.END, f"\nšŸ” Reverse Search Hint: {link}\n")


# -----------------------

# Main App

# -----------------------

root = tk.Tk()

root.title("šŸ“° Fake News Image Detector")

root.geometry("600x500")


frame_top = tk.Frame(root)

frame_top.pack(pady=10)


btn_upload = tk.Button(frame_top, text="šŸ“¤ Upload Image", command=open_image, font=("Arial", 12, "bold"))

btn_upload.pack()


lbl_image = tk.Label(root)

lbl_image.pack(pady=10)


lbl_meta = tk.Label(root, text="Image Metadata & Clues:", font=("Arial", 12, "bold"))

lbl_meta.pack()


txt_metadata = tk.Text(root, wrap=tk.WORD, width=70, height=15)

txt_metadata.pack(pady=10)


root.mainloop()