Blog Pages

AI Chat Summarizer for WhatsApp

import re

import pandas as pd

import matplotlib.pyplot as plt

from textblob import TextBlob

from collections import Counter

from datetime import datetime

import os


# ========== CONFIG ==========

CHAT_FILE = "chat.txt"

PLOTS_FOLDER = "chat_analysis_plots"

os.makedirs(PLOTS_FOLDER, exist_ok=True)


# ========== 1. Parse WhatsApp Chat ==========

def parse_chat(file_path):

    with open(file_path, 'r', encoding='utf-8') as f:

        raw_text = f.readlines()


    messages = []

    pattern = r'^(\d{1,2}/\d{1,2}/\d{2,4}), (\d{1,2}:\d{2}) (AM|PM|am|pm)? - ([^:]+): (.*)'


    for line in raw_text:

        match = re.match(pattern, line)

        if match:

            date, time, am_pm, sender, message = match.groups()

            dt = datetime.strptime(date + " " + time + (" " + am_pm if am_pm else ""), "%d/%m/%Y %I:%M %p")

            messages.append([dt, sender.strip(), message.strip()])

    

    df = pd.DataFrame(messages, columns=["datetime", "sender", "message"])

    return df


# ========== 2. Sentiment & Stats ==========

def analyze_sentiments(df):

    df['polarity'] = df['message'].apply(lambda x: TextBlob(x).sentiment.polarity)

    df['sentiment'] = df['polarity'].apply(lambda x: 'positive' if x > 0.1 else 'negative' if x < -0.1 else 'neutral')

    return df


def top_senders(df, top_n=5):

    return df['sender'].value_counts().head(top_n)


# ========== 3. Plotting Functions ==========

def plot_message_frequency(df):

    df['date'] = df['datetime'].dt.date

    daily_counts = df.groupby('date').size()


    plt.figure(figsize=(12, 5))

    daily_counts.plot(kind='line', color='teal')

    plt.title("Messages Per Day")

    plt.xlabel("Date")

    plt.ylabel("Number of Messages")

    plt.tight_layout()

    plt.savefig(f"{PLOTS_FOLDER}/messages_per_day.png")

    plt.close()


def plot_sender_activity(df):

    sender_counts = df['sender'].value_counts()

    sender_counts.plot(kind='bar', figsize=(10,5), color='orchid')

    plt.title("Messages by Sender")

    plt.ylabel("Message Count")

    plt.tight_layout()

    plt.savefig(f"{PLOTS_FOLDER}/messages_by_sender.png")

    plt.close()


def plot_sentiment_distribution(df):

    sentiment_counts = df['sentiment'].value_counts()

    sentiment_counts.plot(kind='pie', autopct='%1.1f%%', figsize=(6,6), colors=['lightgreen', 'lightcoral', 'lightgrey'])

    plt.title("Sentiment Distribution")

    plt.tight_layout()

    plt.savefig(f"{PLOTS_FOLDER}/sentiment_distribution.png")

    plt.close()


# ========== 4. Generate Summary ==========

def generate_summary(df):

    summary = []

    summary.append(f"Total messages: {len(df)}")

    summary.append(f"Total participants: {df['sender'].nunique()}")

    summary.append("Top 5 active senders:")

    summary.extend(top_senders(df).to_string().split('\n'))


    sentiment_split = df['sentiment'].value_counts(normalize=True) * 100

    summary.append("\nSentiment Breakdown:")

    summary.extend(sentiment_split.round(2).to_string().split('\n'))


    with open("summary_output.txt", "w") as f:

        f.write("\n".join(summary))

    

    return "\n".join(summary)


# ========== MAIN ==========

if __name__ == "__main__":

    print("๐Ÿ“ฅ Parsing chat...")

    df = parse_chat(CHAT_FILE)


    print("๐Ÿง  Analyzing sentiments...")

    df = analyze_sentiments(df)


    print("๐Ÿ“Š Generating plots...")

    plot_message_frequency(df)

    plot_sender_activity(df)

    plot_sentiment_distribution(df)


    print("๐Ÿ“ Writing summary...")

    summary_text = generate_summary(df)

    print(summary_text)


    print("\n✅ Done! Plots saved to 'chat_analysis_plots' and summary to 'summary_output.txt'")


Auto Meeting Notes Generator

import os

import re

import pandas as pd

import whisper

from datetime import datetime


# Optional: For GPT-4 summarization

import openai

from dotenv import load_dotenv


load_dotenv()

openai.api_key = os.getenv("OPENAI_API_KEY")


# ========== CONFIG ==========

AUDIO_FOLDER = "audio"

TRANSCRIPT_FOLDER = "transcriptions"

NOTES_FOLDER = "notes_output"


# ========== SETUP ==========

os.makedirs(TRANSCRIPT_FOLDER, exist_ok=True)

os.makedirs(NOTES_FOLDER, exist_ok=True)


# ========== 1. Transcribe Audio ==========

def transcribe_audio(file_path, model_name="base"):

    model = whisper.load_model(model_name)

    result = model.transcribe(file_path)

    

    filename = os.path.basename(file_path).split('.')[0]

    output_path = os.path.join(TRANSCRIPT_FOLDER, f"{filename}.txt")

    

    with open(output_path, "w", encoding="utf-8") as f:

        f.write(result["text"])

    

    return result["text"]


# ========== 2. Extract Action Items ==========

def extract_action_items(text):

    bullet_pattern = r"(?:-|\*|\d\.)\s*(.+)"

    action_keywords = ["should", "need to", "must", "let's", "we will", "assign", "follow up", "due"]


    actions = []

    for line in text.split('\n'):

        line = line.strip()

        if any(keyword in line.lower() for keyword in action_keywords):

            actions.append(line)


    # Fallback: try extracting bullets

    bullets = re.findall(bullet_pattern, text)

    for b in bullets:

        if any(k in b.lower() for k in action_keywords):

            actions.append(b)

    

    return list(set(actions))


# ========== 3. Summarize with GPT (Optional) ==========

def summarize_with_gpt(transcript_text):

    response = openai.ChatCompletion.create(

        model="gpt-4-turbo",

        messages=[

            {"role": "system", "content": "You are an AI assistant that summarizes meeting transcripts."},

            {"role": "user", "content": f"Summarize this meeting:\n\n{transcript_text}"}

        ]

    )

    return response['choices'][0]['message']['content']


# ========== 4. Save Final Notes ==========

def save_notes(transcript, actions, summary=None, filename="meeting_notes"):

    now = datetime.now().strftime("%Y%m%d_%H%M")

    csv_path = os.path.join(NOTES_FOLDER, f"{filename}_{now}.csv")


    df = pd.DataFrame({

        "Section": ["Transcript", "Action Items", "Summary"],

        "Content": [transcript, "\n".join(actions), summary or "Not generated"]

    })

    df.to_csv(csv_path, index=False)

    print(f"[✔] Notes saved to {csv_path}")


# ========== MAIN ==========

def process_meeting(file_path, use_gpt=False):

    print(f"๐Ÿ”Š Transcribing: {file_path}")

    transcript = transcribe_audio(file_path)


    print("✅ Extracting action items...")

    actions = extract_action_items(transcript)


    summary = None

    if use_gpt:

        print("๐Ÿค– Summarizing with GPT...")

        summary = summarize_with_gpt(transcript)


    file_name = os.path.basename(file_path).split('.')[0]

    save_notes(transcript, actions, summary, file_name)



# ========== RUN ==========

if __name__ == "__main__":

    audio_files = [f for f in os.listdir(AUDIO_FOLDER) if f.endswith(('.mp3', '.wav'))]


    if not audio_files:

        print("⚠️ No audio files found in /audio folder.")

    else:

        for file in audio_files:

            process_meeting(os.path.join(AUDIO_FOLDER, file), use_gpt=True)


Retro Arcade Game Emulator Launcher

import tkinter as tk

from tkinter import messagebox, filedialog

import os

import subprocess

import sqlite3


# ===== Emulator Configuration =====

EMULATOR_PATH = "emulator.exe"  # Update with actual emulator exe

ROMS_FOLDER = "roms"


# ===== Database Setup =====

def init_db():

    conn = sqlite3.connect("user_data.db")

    cur = conn.cursor()

    cur.execute('''

        CREATE TABLE IF NOT EXISTS favorites (

            id INTEGER PRIMARY KEY,

            rom TEXT UNIQUE

        )

    ''')

    conn.commit()

    conn.close()


def add_favorite(rom):

    conn = sqlite3.connect("user_data.db")

    cur = conn.cursor()

    try:

        cur.execute("INSERT INTO favorites (rom) VALUES (?)", (rom,))

        conn.commit()

    except sqlite3.IntegrityError:

        pass

    conn.close()


def remove_favorite(rom):

    conn = sqlite3.connect("user_data.db")

    cur = conn.cursor()

    cur.execute("DELETE FROM favorites WHERE rom=?", (rom,))

    conn.commit()

    conn.close()


def get_favorites():

    conn = sqlite3.connect("user_data.db")

    cur = conn.cursor()

    cur.execute("SELECT rom FROM favorites")

    favs = [row[0] for row in cur.fetchall()]

    conn.close()

    return favs


# ===== Launcher GUI =====

class GameLauncher:

    def __init__(self, root):

        self.root = root

        self.root.title("๐ŸŽฎ Retro Arcade Launcher")

        self.root.geometry("500x500")


        self.favorites = get_favorites()


        self.label = tk.Label(root, text="Available Games", font=("Arial", 14, "bold"))

        self.label.pack(pady=10)


        self.listbox = tk.Listbox(root, width=50, height=20)

        self.populate_list()

        self.listbox.pack()


        btn_frame = tk.Frame(root)

        btn_frame.pack(pady=10)


        tk.Button(btn_frame, text="▶️ Play", command=self.launch_game).grid(row=0, column=0, padx=5)

        tk.Button(btn_frame, text="⭐ Add Fav", command=self.add_to_favorites).grid(row=0, column=1, padx=5)

        tk.Button(btn_frame, text="❌ Remove Fav", command=self.remove_from_favorites).grid(row=0, column=2, padx=5)

        tk.Button(btn_frame, text="๐Ÿ” Refresh", command=self.refresh).grid(row=0, column=3, padx=5)


    def populate_list(self):

        self.listbox.delete(0, tk.END)

        if not os.path.exists(ROMS_FOLDER):

            os.makedirs(ROMS_FOLDER)


        files = [f for f in os.listdir(ROMS_FOLDER) if f.endswith((".nes", ".gba"))]

        for f in files:

            label = f + (" ⭐" if f in self.favorites else "")

            self.listbox.insert(tk.END, label)


    def get_selected_rom(self):

        try:

            selected = self.listbox.get(tk.ACTIVE)

            return selected.replace(" ⭐", "")

        except:

            return None


    def launch_game(self):

        rom = self.get_selected_rom()

        if not rom:

            messagebox.showwarning("No Selection", "Please select a game.")

            return


        rom_path = os.path.join(ROMS_FOLDER, rom)

        if not os.path.exists(EMULATOR_PATH):

            messagebox.showerror("Emulator Not Found", "Update the emulator path in the code.")

            return


        subprocess.Popen([EMULATOR_PATH, rom_path])

        print(f"Launching {rom}...")


    def add_to_favorites(self):

        rom = self.get_selected_rom()

        if rom:

            add_favorite(rom)

            self.refresh()


    def remove_from_favorites(self):

        rom = self.get_selected_rom()

        if rom:

            remove_favorite(rom)

            self.refresh()


    def refresh(self):

        self.favorites = get_favorites()

        self.populate_list()


# === Main ===

if __name__ == "__main__":

    init_db()

    root = tk.Tk()

    app = GameLauncher(root)

    root.mainloop()


Secure Diary with Face Unlock

 Install Requirements

pip install face_recognition opencv-python cryptography

Capture Reference Face

# Optional: capture face from webcam and save as known_face.jpg
import cv2
cam = cv2.VideoCapture(0)
ret, frame = cam.read()
cv2.imwrite("known_face.jpg", frame)
cam.release()
print("Reference face saved.")

Secure Diary Code

import face_recognition
import cv2
import os
from cryptography.fernet import Fernet
from getpass import getpass
import base64

KEY_FILE = "secret.key"
ENC_FILE = "diary.enc"
KNOWN_FACE_FILE = "known_face.jpg"

# === Face Unlock ===
def verify_face():
    if not os.path.exists(KNOWN_FACE_FILE):
        print("Known face image not found.")
        return False

    known_image = face_recognition.load_image_file(KNOWN_FACE_FILE)
    known_encoding = face_recognition.face_encodings(known_image)[0]

    cam = cv2.VideoCapture(0)
    print("Looking for face...")
    ret, frame = cam.read()
    cam.release()

    try:
        unknown_encoding = face_recognition.face_encodings(frame)[0]
        result = face_recognition.compare_faces([known_encoding], unknown_encoding)[0]
        return result
    except IndexError:
        print("No face detected.")
        return False

# === Encryption Helpers ===
def generate_key(password):
    return base64.urlsafe_b64encode(password.encode().ljust(32)[:32])

def encrypt_diary(content, key):
    f = Fernet(key)
    with open(ENC_FILE, "wb") as file:
        file.write(f.encrypt(content.encode()))
    print("Diary encrypted & saved.")

def decrypt_diary(key):
    f = Fernet(key)
    with open(ENC_FILE, "rb") as file:
        data = file.read()
    return f.decrypt(data).decode()

# === Main Diary App ===
def diary_app():
    print("๐Ÿ” Secure Diary Access")

    if not verify_face():
        print("Face verification failed. Access denied.")
        return

    password = getpass("Enter your diary password: ")
    key = generate_key(password)

    if os.path.exists(ENC_FILE):
        try:
            decrypted = decrypt_diary(key)
            print("\n๐Ÿ“– Your Diary:\n", decrypted)
        except:
            print("Failed to decrypt. Wrong password?")
            return
    else:
        print("No diary found. Creating new one...")

    new_entry = input("\n✍️ Write new entry (append to diary):\n> ")
    combined = decrypted + "\n\n" + new_entry if 'decrypted' in locals() else new_entry
    encrypt_diary(combined, key)

if __name__ == "__main__":
    diary_app()

Screen Time Tracker

import time

import pandas as pd

import matplotlib.pyplot as plt

from datetime import datetime

import win32gui  # Windows-only; use AppKit for Mac


LOG_FILE = "screen_time_log.csv"

TRACK_DURATION_MINUTES = 1  # Change as needed

INTERVAL_SECONDS = 5


def get_active_window_title():

    try:

        return win32gui.GetWindowText(win32gui.GetForegroundWindow())

    except:

        return "Unknown"


def track_screen_time(duration_minutes=1, interval=5):

    end_time = time.time() + (duration_minutes * 60)

    usage_log = []


    print("Tracking started... Press Ctrl+C to stop early.")

    while time.time() < end_time:

        window = get_active_window_title()

        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        usage_log.append((timestamp, window))

        time.sleep(interval)


    # Save to CSV

    df = pd.DataFrame(usage_log, columns=["Timestamp", "Window"])

    df.to_csv(LOG_FILE, index=False)

    print(f"Tracking complete. Data saved to {LOG_FILE}")

    return df


def generate_report(csv_file):

    df = pd.read_csv(csv_file)

    df["Window"] = df["Window"].fillna("Unknown")


    # Count frequency of window usage

    summary = df["Window"].value_counts().head(10)  # Top 10 apps/windows


    # Plot

    plt.figure(figsize=(10, 6))

    summary.plot(kind="bar", color="skyblue")

    plt.title("Most Used Windows/Apps")

    plt.xlabel("Window Title")

    plt.ylabel("Active Window Count")

    plt.xticks(rotation=45, ha="right")

    plt.tight_layout()

    plt.show()


if __name__ == "__main__":

    df = track_screen_time(TRACK_DURATION_MINUTES, INTERVAL_SECONDS)

    generate_report(LOG_FILE)


News Authenticity Checker

import tkinter as tk

from tkinter import messagebox

from newspaper import Article

from sklearn.feature_extraction.text import TfidfVectorizer

import requests


NEWS_API_KEY = 'your_newsapi_key_here'  # Replace with your NewsAPI key


# ---------- News Verifier Logic ----------

def extract_keywords(text, top_n=10):

    vectorizer = TfidfVectorizer(stop_words='english')

    X = vectorizer.fit_transform([text])

    keywords = sorted(zip(vectorizer.get_feature_names_out(), X.toarray()[0]), key=lambda x: -x[1])

    return [k[0] for k in keywords[:top_n]]


def fetch_related_news(keywords):

    query = ' OR '.join(keywords[:3])  # Limit to top 3

    url = f'https://newsapi.org/v2/everything?q={query}&language=en&apiKey={NEWS_API_KEY}'

    res = requests.get(url)

    if res.status_code != 200:

        return []

    return [a['title'] + ' ' + a['description'] for a in res.json().get('articles', [])]


def calculate_authenticity(article_keywords, related_news):

    related_text = ' '.join(related_news)

    matches = [kw for kw in article_keywords if kw.lower() in related_text.lower()]

    return int((len(matches) / len(article_keywords)) * 100) if article_keywords else 0


# ---------- GUI ----------

class NewsCheckerApp:

    def __init__(self, root):

        self.root = root

        self.root.title("๐Ÿ•ต️ News Authenticity Checker")


        tk.Label(root, text="Paste News Article URL:").pack()

        self.url_entry = tk.Entry(root, width=60)

        self.url_entry.pack(pady=5)


        tk.Button(root, text="๐Ÿ” Check Authenticity", command=self.check_news).pack(pady=10)


        self.result_label = tk.Label(root, text="", font=("Arial", 12, "bold"))

        self.result_label.pack(pady=20)


    def check_news(self):

        url = self.url_entry.get().strip()

        if not url:

            messagebox.showerror("Error", "Please enter a news URL.")

            return


        try:

            article = Article(url)

            article.download()

            article.parse()

            content = article.text


            article_keywords = extract_keywords(content)

            related_news = fetch_related_news(article_keywords)

            score = calculate_authenticity(article_keywords, related_news)


            result = f"✅ Authenticity Score: {score}%"

            if score < 40:

                result += "\n⚠️ This article may not be widely reported."

            else:

                result += "\n๐Ÿ‘ Seems consistent with other sources."


            self.result_label.config(text=result)


        except Exception as e:

            messagebox.showerror("Error", str(e))


# ---------- Run App ----------

if __name__ == "__main__":

    root = tk.Tk()

    app = NewsCheckerApp(root)

    root.mainloop()


PDF Bill Splitter App

 import tkinter as tk

from tkinter import filedialog, messagebox

import fitz  # PyMuPDF

import re

import sqlite3

import os


# ---------- Database Setup ----------

conn = sqlite3.connect('bill_splitter.db')

cursor = conn.cursor()

cursor.execute('''

CREATE TABLE IF NOT EXISTS bills (

    id INTEGER PRIMARY KEY AUTOINCREMENT,

    filename TEXT,

    total_amount REAL,

    per_person REAL,

    roommates TEXT

)

''')

conn.commit()


# ---------- PDF Total Extractor ----------

def extract_total_from_pdf(file_path):

    try:

        doc = fitz.open(file_path)

        text = ""

        for page in doc:

            text += page.get_text()

        doc.close()


        # Try finding the largest ₹/$/Rs. number

        amounts = re.findall(r'[\₹\$\₹Rs\. ]?(\d+[,.]?\d*)', text)

        float_amounts = [float(a.replace(',', '')) for a in amounts]

        return max(float_amounts) if float_amounts else 0.0


    except Exception as e:

        messagebox.showerror("Error", f"Failed to extract total: {str(e)}")

        return 0.0


# ---------- GUI ----------

class BillSplitterApp:

    def __init__(self, root):

        self.root = root

        self.root.title("๐Ÿ“„ PDF Bill Splitter")

        self.filename = None


        # UI Layout

        tk.Button(root, text="๐Ÿ“‚ Upload PDF Bill", command=self.upload_pdf).pack(pady=10)


        self.total_var = tk.StringVar()

        tk.Label(root, text="๐Ÿ’ฐ Total Amount:").pack()

        tk.Entry(root, textvariable=self.total_var, state="readonly").pack(pady=5)


        self.roommates_entry = tk.Entry(root)

        self.roommates_entry.pack(pady=5)

        self.roommates_entry.insert(0, "Enter emails/names comma-separated")


        tk.Button(root, text="➗ Split Bill", command=self.split_bill).pack(pady=10)


    def upload_pdf(self):

        file_path = filedialog.askopenfilename(filetypes=[("PDF Files", "*.pdf")])

        if file_path:

            self.filename = os.path.basename(file_path)

            total = extract_total_from_pdf(file_path)

            self.total_var.set(f"{total:.2f}")


    def split_bill(self):

        total = self.total_var.get()

        roommates = self.roommates_entry.get().split(',')


        if not total or not roommates or len(roommates) < 1:

            messagebox.showerror("Error", "Please upload a PDF and enter roommates.")

            return


        try:

            total = float(total)

            per_person = total / len(roommates)

            roommates_clean = [r.strip() for r in roommates]


            # Save to DB

            cursor.execute('''

                INSERT INTO bills (filename, total_amount, per_person, roommates)

                VALUES (?, ?, ?, ?)

            ''', (self.filename, total, per_person, ", ".join(roommates_clean)))

            conn.commit()


            messagebox.showinfo("Success", f"Each roommate pays: ₹{per_person:.2f}")

            self.email_roommates(roommates_clean, per_person)


        except Exception as e:

            messagebox.showerror("Error", str(e))


    def email_roommates(self, roommates, amount):

        # Mock email reminder

        for r in roommates:

            print(f"[Email to {r}] Your share: ₹{amount:.2f}")


# ---------- Run ----------

if __name__ == "__main__":

    root = tk.Tk()

    app = BillSplitterApp(root)

    root.mainloop()


Mental Health Journal Analyzer

 import streamlit as st

from textblob import TextBlob

import pandas as pd

import matplotlib.pyplot as plt

from datetime import datetime, timedelta

import os


# CSV to store journal data

DATA_FILE = "journal_data.csv"


# Function to analyze sentiment

def analyze_sentiment(text):

    blob = TextBlob(text)

    return blob.sentiment.polarity  # -1 to 1


# Load data

def load_data():

    if os.path.exists(DATA_FILE):

        return pd.read_csv(DATA_FILE, parse_dates=['date'])

    else:

        return pd.DataFrame(columns=["date", "entry", "sentiment"])


# Save data

def save_data(entry, sentiment):

    new_data = pd.DataFrame({

        "date": [datetime.now().date()],

        "entry": [entry],

        "sentiment": [sentiment]

    })

    data = load_data()

    data = pd.concat([data, new_data], ignore_index=True)

    data.to_csv(DATA_FILE, index=False)


# Weekly sentiment plot

def plot_sentiment(data):

    recent = data[data['date'] >= (datetime.now().date() - timedelta(days=6))]

    daily_avg = recent.groupby('date')['sentiment'].mean().reset_index()


    plt.figure(figsize=(10, 4))

    plt.plot(daily_avg['date'], daily_avg['sentiment'], marker='o', linestyle='-')

    plt.title("๐Ÿง  Weekly Mental Wellness Trend")

    plt.xlabel("Date")

    plt.ylabel("Sentiment Score")

    plt.ylim(-1, 1)

    plt.axhline(0, color='gray', linestyle='--')

    plt.grid(True)

    st.pyplot(plt)


# ---------------- STREAMLIT APP ----------------


st.set_page_config(page_title="Mental Health Journal Analyzer", layout="centered")

st.title("๐Ÿง  Mental Health Journal Analyzer")

st.markdown("Write your daily mood journal and analyze your wellness trend.")


# Input

journal_entry = st.text_area("Write today's journal entry:", height=150)


if st.button("Analyze & Save"):

    if journal_entry.strip():

        sentiment = analyze_sentiment(journal_entry)

        save_data(journal_entry, sentiment)

        st.success(f"Entry saved! Sentiment Score: {sentiment:.2f}")

    else:

        st.warning("Please write something in your journal.")


# Load and plot

data = load_data()

if not data.empty:

    st.subheader("๐Ÿ“Š Weekly Sentiment Trend")

    plot_sentiment(data)


Code-Based Game Launcher

 import tkinter as tk

from tkinter import filedialog, messagebox

import os

import subprocess


class GameLauncher:

    def __init__(self, root):

        self.root = root

        self.root.title("๐Ÿ•น️ Python Game Launcher")

        self.root.geometry("500x400")

        self.root.config(bg="#1e1e1e")


        self.games = []


        self.label = tk.Label(root, text="๐ŸŽฎ Your Python Games", font=("Helvetica", 16), fg="white", bg="#1e1e1e")

        self.label.pack(pady=10)


        self.game_listbox = tk.Listbox(root, width=50, height=15, font=("Courier", 10))

        self.game_listbox.pack(pady=10)


        self.launch_button = tk.Button(root, text="๐Ÿš€ Launch Game", command=self.launch_game, bg="#28a745", fg="white", font=("Helvetica", 12))

        self.launch_button.pack(pady=5)


        self.load_button = tk.Button(root, text="๐Ÿ“‚ Load Games Folder", command=self.load_games, bg="#007bff", fg="white", font=("Helvetica", 12))

        self.load_button.pack(pady=5)


    def load_games(self):

        folder_path = filedialog.askdirectory(title="Select Game Folder")

        if not folder_path:

            return


        self.games = []

        self.game_listbox.delete(0, tk.END)


        for file in os.listdir(folder_path):

            if file.endswith(".py"):

                self.games.append(os.path.join(folder_path, file))

                self.game_listbox.insert(tk.END, file)


        if not self.games:

            messagebox.showinfo("No Games Found", "No Python (.py) files found in the selected folder.")


    def launch_game(self):

        selected_index = self.game_listbox.curselection()

        if not selected_index:

            messagebox.showwarning("No Selection", "Please select a game to launch.")

            return


        game_path = self.games[selected_index[0]]

        try:

            subprocess.Popen(["python", game_path], shell=True)

        except Exception as e:

            messagebox.showerror("Error", f"Failed to launch the game:\n{e}")



# Run the launcher

if __name__ == "__main__":

    root = tk.Tk()

    app = GameLauncher(root)

    root.mainloop()


PDF Translator

 import fitz  # PyMuPDF

from googletrans import Translator

from reportlab.pdfgen import canvas

from reportlab.lib.pagesizes import A4

import tkinter as tk

from tkinter import filedialog, simpledialog, messagebox



def extract_text_from_pdf(pdf_path):

    doc = fitz.open(pdf_path)

    full_text = ""

    for page in doc:

        full_text += page.get_text()

    doc.close()

    return full_text



def translate_text(text, dest_lang='fr'):

    translator = Translator()

    try:

        translated = translator.translate(text, dest=dest_lang)

        return translated.text

    except Exception as e:

        print("Translation error:", e)

        return None



def save_text_as_pdf(text, output_path):

    c = canvas.Canvas(output_path, pagesize=A4)

    width, height = A4

    lines = text.split('\n')

    y = height - 40


    for line in lines:

        if y < 40:  # new page

            c.showPage()

            y = height - 40

        c.drawString(40, y, line)

        y -= 15


    c.save()



def run_translator():

    pdf_path = filedialog.askopenfilename(title="Select PDF", filetypes=[("PDF files", "*.pdf")])

    if not pdf_path:

        return


    lang_code = simpledialog.askstring("Language Code", "Enter target language code (e.g., 'es' for Spanish, 'de' for German):")

    if not lang_code:

        return


    try:

        extracted_text = extract_text_from_pdf(pdf_path)

        messagebox.showinfo("Info", "Text extracted. Translating...")


        translated_text = translate_text(extracted_text, dest_lang=lang_code)

        if not translated_text:

            messagebox.showerror("Error", "Translation failed.")

            return


        save_path = filedialog.asksaveasfilename(defaultextension=".pdf", filetypes=[("PDF files", "*.pdf")])

        if not save_path:

            return


        save_text_as_pdf(translated_text, save_path)

        messagebox.showinfo("Success", f"Translated PDF saved at:\n{save_path}")

    except Exception as e:

        messagebox.showerror("Error", str(e))



# GUI

root = tk.Tk()

root.title(" PDF Translator")

root.geometry("400x200")


label = tk.Label(root, text="PDF Translator", font=("Arial", 16))

label.pack(pady=20)


translate_btn = tk.Button(root, text="Select and Translate PDF", command=run_translator, bg="#007BFF", fg="white", font=("Arial", 12))

translate_btn.pack(pady=10)


root.mainloop()


Real-Time Location Tracker

 import tkinter as tk

from tkinter import messagebox

import requests

import folium

import webbrowser

from geopy.geocoders import Nominatim

import os


def get_location():

    try:

        response = requests.get("https://ipinfo.io/json")

        data = response.json()

        loc = data['loc'].split(',')

        latitude = float(loc[0])

        longitude = float(loc[1])

        city = data.get('city', 'Unknown')

        return latitude, longitude, city

    except Exception as e:

        messagebox.showerror("Error", f"Could not get location.\n{str(e)}")

        return None, None, None


def show_location():

    lat, lon, city = get_location()

    if lat is None or lon is None:

        return


    # Reverse geocode to get address

    geolocator = Nominatim(user_agent="geoapiExercises")

    location = geolocator.reverse((lat, lon), language="en")

    address = location.address if location else "Address not found"


    # Show location on map

    map_obj = folium.Map(location=[lat, lon], zoom_start=14)

    folium.Marker([lat, lon], popup=f"{address}", tooltip="You are here").add_to(map_obj)


    map_file = "real_time_location.html"

    map_obj.save(map_file)

    webbrowser.open(f"file://{os.path.abspath(map_file)}")


    location_label.config(text=f"City: {city}\nLatitude: {lat}\nLongitude: {lon}\n\n{address}")


# GUI

app = tk.Tk()

app.title("๐Ÿ“ Real-Time Location Tracker")

app.geometry("500x300")


tk.Label(app, text="Click the button to track your location", font=("Arial", 14)).pack(pady=20)


tk.Button(app, text="Track My Location", command=show_location, bg="#1E90FF", fg="white", font=("Arial", 12)).pack(pady=10)


location_label = tk.Label(app, text="", font=("Arial", 10), justify="left", wraplength=450)

location_label.pack(pady=20)


app.mainloop()


Podcast Downloader & Organizer

import tkinter as tk

from tkinter import filedialog, messagebox, Listbox, Scrollbar

import feedparser

import requests

import os


def fetch_episodes():

    url = url_entry.get()

    if not url:

        messagebox.showerror("Error", "Please enter a podcast RSS feed URL.")

        return

    

    global feed

    feed = feedparser.parse(url)

    episodes_list.delete(0, tk.END)


    if not feed.entries:

        messagebox.showerror("Error", "No episodes found or invalid RSS URL.")

        return


    for i, entry in enumerate(feed.entries[:20]):

        title = entry.title

        episodes_list.insert(tk.END, f"{i+1}. {title}")


def download_selected():

    selected = episodes_list.curselection()

    if not selected:

        messagebox.showerror("Error", "Please select an episode to download.")

        return


    entry = feed.entries[selected[0]]

    title = feed.feed.title

    episode_title = entry.title

    audio_url = entry.enclosures[0].href if entry.enclosures else None


    if not audio_url:

        messagebox.showerror("Error", "No audio found in this episode.")

        return


    folder = os.path.join("Podcasts", title)

    os.makedirs(folder, exist_ok=True)

    filename = os.path.join(folder, f"{episode_title}.mp3")


    try:

        r = requests.get(audio_url, stream=True)

        with open(filename, 'wb') as f:

            for chunk in r.iter_content(1024):

                f.write(chunk)

        messagebox.showinfo("Success", f"Downloaded: {episode_title}")

    except Exception as e:

        messagebox.showerror("Download Failed", str(e))


# GUI setup

app = tk.Tk()

app.title("๐ŸŽ™️ Podcast Downloader & Organizer")

app.geometry("500x400")


tk.Label(app, text="Enter Podcast RSS Feed URL:").pack(pady=5)

url_entry = tk.Entry(app, width=60)

url_entry.pack(pady=5)


tk.Button(app, text="Fetch Episodes", command=fetch_episodes).pack(pady=5)


frame = tk.Frame(app)

frame.pack(pady=10, fill=tk.BOTH, expand=True)


scrollbar = Scrollbar(frame)

scrollbar.pack(side=tk.RIGHT, fill=tk.Y)


episodes_list = Listbox(frame, yscrollcommand=scrollbar.set, width=80)

episodes_list.pack(padx=10, pady=10, expand=True)

scrollbar.config(command=episodes_list.yview)


tk.Button(app, text="Download Selected Episode", command=download_selected, bg="#4CAF50", fg="white").pack(pady=10)


app.mainloop()


Invoice Data Extractor

import tkinter as tk

from tkinter import filedialog, messagebox

from PIL import Image

import pytesseract

from pdf2image import convert_from_path

import re

import os


# Optional: Set Tesseract path

# pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'


def extract_text_from_file(file_path):

    if file_path.lower().endswith('.pdf'):

        images = convert_from_path(file_path, dpi=300)

        text = ""

        for image in images:

            text += pytesseract.image_to_string(image)

        return text

    elif file_path.lower().endswith(('.png', '.jpg', '.jpeg')):

        image = Image.open(file_path)

        return pytesseract.image_to_string(image)

    else:

        return ""


def extract_invoice_data(text):

    invoice_number = re.search(r'Invoice\s*#?:?\s*(\w+)', text, re.IGNORECASE)

    date = re.search(r'Date\s*:?(\s*\d{1,2}/\d{1,2}/\d{2,4})', text, re.IGNORECASE)

    total = re.search(r'Total\s*:?[\s$]*(\d+[\.,]?\d*)', text, re.IGNORECASE)


    return {

        "Invoice Number": invoice_number.group(1) if invoice_number else "Not Found",

        "Date": date.group(1).strip() if date else "Not Found",

        "Total Amount": total.group(1) if total else "Not Found"

    }


def process_invoice():

    file_path = filedialog.askopenfilename(title="Select Invoice", filetypes=[("PDF/Image Files", "*.pdf *.jpg *.png *.jpeg")])

    if not file_path:

        return


    text = extract_text_from_file(file_path)

    data = extract_invoice_data(text)


    result = f"""

    ๐Ÿ“„ File: {os.path.basename(file_path)}

    ๐Ÿงพ Invoice Number: {data['Invoice Number']}

    ๐Ÿ“… Date: {data['Date']}

    ๐Ÿ’ฐ Total: {data['Total Amount']}

    """

    messagebox.showinfo("Extracted Invoice Data", result)


# GUI Setup

app = tk.Tk()

app.title("๐Ÿงพ Invoice Data Extractor")

app.geometry("400x200")

app.configure(bg="#f9f9f9")


label = tk.Label(app, text="Click below to select an invoice PDF or image", bg="#f9f9f9", font=("Helvetica", 12))

label.pack(pady=20)


btn = tk.Button(app, text="Select Invoice", command=process_invoice, bg="#4CAF50", fg="white", font=("Helvetica", 12), padx=10, pady=5)

btn.pack()


app.mainloop()


File Locker CLI

import argparse

import getpass

import os

from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC

from cryptography.hazmat.backends import default_backend

from cryptography.hazmat.primitives import hashes

from cryptography.fernet import Fernet

import base64


def derive_key(password: str, salt: bytes) -> bytes:

    kdf = PBKDF2HMAC(

        algorithm=hashes.SHA256(),

        length=32,

        salt=salt,

        iterations=390000,

        backend=default_backend()

    )

    return base64.urlsafe_b64encode(kdf.derive(password.encode()))


def encrypt_file(filepath, password):

    with open(filepath, 'rb') as file:

        data = file.read()


    salt = os.urandom(16)

    key = derive_key(password, salt)

    fernet = Fernet(key)

    encrypted_data = fernet.encrypt(data)


    with open(filepath + ".locked", 'wb') as file:

        file.write(salt + encrypted_data)


    os.remove(filepath)

    print(f"๐Ÿ”’ File encrypted as {filepath}.locked")


def decrypt_file(filepath, password):

    with open(filepath, 'rb') as file:

        content = file.read()


    salt = content[:16]

    encrypted_data = content[16:]

    key = derive_key(password, salt)

    fernet = Fernet(key)


    try:

        decrypted_data = fernet.decrypt(encrypted_data)

    except Exception:

        print("❌ Wrong password or corrupted file.")

        return


    original_path = filepath.replace(".locked", "")

    with open(original_path, 'wb') as file:

        file.write(decrypted_data)


    os.remove(filepath)

    print(f"๐Ÿ”“ File decrypted as {original_path}")


def main():

    parser = argparse.ArgumentParser(description="๐Ÿ” File Locker CLI")

    parser.add_argument("action", choices=["lock", "unlock"], help="Lock or unlock the file")

    parser.add_argument("filepath", help="Path to the file")


    args = parser.parse_args()

    password = getpass.getpass("Enter password: ")


    if args.action == "lock":

        encrypt_file(args.filepath, password)

    elif args.action == "unlock":

        decrypt_file(args.filepath, password)


if __name__ == "__main__":

    main()


Plagiarism Detector

 import difflib

from sklearn.feature_extraction.text import TfidfVectorizer

from sklearn.metrics.pairwise import cosine_similarity


def read_file(filename):

    with open(filename, 'r', encoding='utf-8') as file:

        return file.read()


def basic_diff_score(text1, text2):

    seq = difflib.SequenceMatcher(None, text1, text2)

    return round(seq.ratio() * 100, 2)


def nlp_cosine_similarity(text1, text2):

    tfidf = TfidfVectorizer().fit_transform([text1, text2])

    score = cosine_similarity(tfidf[0:1], tfidf[1:2])

    return round(score[0][0] * 100, 2)


def main():

    file1 = input("Enter path to first file: ")

    file2 = input("Enter path to second file: ")


    text1 = read_file(file1)

    text2 = read_file(file2)


    basic_score = basic_diff_score(text1, text2)

    nlp_score = nlp_cosine_similarity(text1, text2)


    print("\n--- Plagiarism Detection Result ---")

    print(f"Simple Match (difflib): {basic_score}%")

    print(f"Semantic Match (TF-IDF Cosine Similarity): {nlp_score}%")


    if nlp_score > 80:

        print("⚠️ High similarity detected. Possible plagiarism.")

    elif nlp_score > 50:

        print("⚠️ Moderate similarity. Review recommended.")

    else:

        print("✅ Low similarity. Likely original.")


if __name__ == "__main__":

    main()


Online Exam Proctoring Tool

import cv2

import face_recognition

import datetime


def log_alert(msg):

    with open("proctoring_log.txt", "a") as f:

        f.write(f"{datetime.datetime.now()} - {msg}\n")


def main():

    cap = cv2.VideoCapture(0)


    if not cap.isOpened():

        print("Could not access webcam.")

        return


    print("[INFO] Proctoring started. Press 'q' to quit.")


    while True:

        ret, frame = cap.read()

        if not ret:

            break


        # Resize frame for faster processing

        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        rgb_small_frame = small_frame[:, :, ::-1]


        # Detect all faces

        face_locations = face_recognition.face_locations(rgb_small_frame)

        face_count = len(face_locations)


        # Draw rectangles around faces

        for (top, right, bottom, left) in face_locations:

            top, right, bottom, left = top*4, right*4, bottom*4, left*4

            cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)


        # Check for multiple faces

        if face_count > 1:

            cv2.putText(frame, "ALERT: Multiple Faces Detected!", (10, 30),

                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)

            log_alert("Multiple faces detected!")


        # Show output

        cv2.imshow("Exam Proctoring Feed", frame)


        # Break on 'q' key

        if cv2.waitKey(1) & 0xFF == ord('q'):

            break


    cap.release()

    cv2.destroyAllWindows()

    print("[INFO] Proctoring ended.")


if __name__ == "__main__":

    main()


Budget Planner with Visualization

 import pandas as pd

import matplotlib.pyplot as plt

from tkinter import *

from tkinter import ttk, messagebox

import os

from datetime import datetime


# Create main window

root = Tk()

root.title("Budget Planner with Visualization")

root.geometry("800x600")


# CSV file name

filename = "budget_data.csv"


# Load existing data

if os.path.exists(filename):

    df = pd.read_csv(filename)

else:

    df = pd.DataFrame(columns=["Date", "Category", "Type", "Amount"])


# Function to add entry

def add_entry():

    date = date_entry.get()

    category = category_entry.get()

    ttype = type_combo.get()

    amount = amount_entry.get()


    if not date or not category or not ttype or not amount:

        messagebox.showerror("Input Error", "All fields are required.")

        return


    try:

        datetime.strptime(date, "%Y-%m-%d")

        amount = float(amount)

    except:

        messagebox.showerror("Input Error", "Invalid date or amount.")

        return


    new_entry = {"Date": date, "Category": category, "Type": ttype, "Amount": amount}

    global df

    df = pd.concat([df, pd.DataFrame([new_entry])], ignore_index=True)

    df.to_csv(filename, index=False)

    update_table()

    messagebox.showinfo("Success", "Entry added successfully!")


# Function to update table

def update_table():

    for row in tree.get_children():

        tree.delete(row)

    for index, row in df.iterrows():

        tree.insert("", "end", values=list(row))


# Function to show chart

def show_chart():

    try:

        expense_df = df[df["Type"] == "Expense"]

        summary = expense_df.groupby("Category")["Amount"].sum()

        summary.plot.pie(autopct="%1.1f%%", startangle=90)

        plt.title("Expenses by Category")

        plt.ylabel("")

        plt.show()

    except Exception as e:

        messagebox.showerror("Chart Error", str(e))


# Input Form

form_frame = Frame(root)

form_frame.pack(pady=10)


Label(form_frame, text="Date (YYYY-MM-DD)").grid(row=0, column=0, padx=5)

date_entry = Entry(form_frame)

date_entry.grid(row=0, column=1, padx=5)


Label(form_frame, text="Category").grid(row=0, column=2, padx=5)

category_entry = Entry(form_frame)

category_entry.grid(row=0, column=3, padx=5)


Label(form_frame, text="Type").grid(row=1, column=0, padx=5)

type_combo = ttk.Combobox(form_frame, values=["Income", "Expense"], state="readonly")

type_combo.grid(row=1, column=1, padx=5)


Label(form_frame, text="Amount").grid(row=1, column=2, padx=5)

amount_entry = Entry(form_frame)

amount_entry.grid(row=1, column=3, padx=5)


Button(form_frame, text="Add Entry", command=add_entry).grid(row=2, column=0, columnspan=4, pady=10)


# Treeview

tree = ttk.Treeview(root, columns=["Date", "Category", "Type", "Amount"], show="headings")

for col in ["Date", "Category", "Type", "Amount"]:

    tree.heading(col, text=col)

    tree.column(col, width=150)

tree.pack(fill=BOTH, expand=True, pady=10)


# Show chart button

Button(root, text="Show Expense Chart", command=show_chart).pack(pady=10)


# Load initial table

update_table()


root.mainloop()


Website Availability & Uptime Tracker

 import requests

import schedule

import time

import smtplib

from email.mime.text import MIMEText


# List of websites to track

websites = {

    "Google": "https://www.google.com",

    "My Portfolio": "https://yourportfolio.com"

}


# Email Config

EMAIL_ADDRESS = "your_email@gmail.com"

EMAIL_PASSWORD = "your_app_password"

TO_EMAIL = "recipient_email@example.com"


def send_email_alert(site):

    msg = MIMEText(f"Alert: {site} is DOWN!")

    msg["Subject"] = f"๐Ÿšจ {site} is Down!"

    msg["From"] = EMAIL_ADDRESS

    msg["To"] = TO_EMAIL


    with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:

        smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)

        smtp.send_message(msg)

        print(f"Email sent: {site} is down.")


def check_sites():

    for name, url in websites.items():

        try:

            response = requests.get(url, timeout=5)

            if response.status_code != 200:

                print(f"[{name}] Status: {response.status_code}")

                send_email_alert(name)

            else:

                print(f"[{name}] is up ✅")

        except requests.exceptions.RequestException:

            print(f"[{name}] is unreachable ❌")

            send_email_alert(name)


# Check every 5 minutes

schedule.every(5).minutes.do(check_sites)


print("๐Ÿ” Website Uptime Tracker Started...")

while True:

    schedule.run_pending()

    time.sleep(1)



Twilio SMS Integration


from twilio.rest import Client

account_sid = "your_sid"
auth_token = "your_token"
twilio_number = "+1234567890"
receiver_number = "+91999xxxxxxx"

def send_sms_alert(site):
    client = Client(account_sid, auth_token)
    client.messages.create(
        body=f"๐Ÿšจ {site} is DOWN!",
        from_=twilio_number,
        to=receiver_number
    )

Document Similarity Checker

 import streamlit as st

from sklearn.feature_extraction.text import TfidfVectorizer

from sklearn.metrics.pairwise import cosine_similarity

import spacy


nlp = spacy.load("en_core_web_sm")


st.title("๐Ÿ“„ Document Similarity Checker")


def preprocess(text):

    doc = nlp(text)

    return " ".join([token.lemma_ for token in doc if not token.is_stop and token.is_alpha])


file1 = st.file_uploader("Upload First Document", type=["txt"])

file2 = st.file_uploader("Upload Second Document", type=["txt"])


if file1 and file2:

    text1 = file1.read().decode("utf-8")

    text2 = file2.read().decode("utf-8")


    clean_text1 = preprocess(text1)

    clean_text2 = preprocess(text2)


    tfidf = TfidfVectorizer()

    tfidf_matrix = tfidf.fit_transform([clean_text1, clean_text2])

    sim_score = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]


    st.subheader("๐Ÿงฎ Similarity Score")

    st.write(f"**{sim_score:.2f}** (1 = identical, 0 = completely different)")


    if sim_score > 0.75:

        st.success("The documents are quite similar! ๐ŸŸข")

    elif sim_score > 0.4:

        st.info("The documents are moderately similar. ๐ŸŸก")

    else:

        st.warning("The documents are quite different. ๐Ÿ”ด")


Smart Calendar CLI App

 import click

import requests

from datetime import datetime, timedelta


# Placeholder for real GCal and weather integration

EVENTS = [

    {"title": "Standup Meeting", "time": "10:00", "date": "2025-04-16", "location": "Remote"},

    {"title": "Doctor Appointment", "time": "15:00", "date": "2025-04-16", "location": "City Clinic"},

]


@click.group()

def cli():

    pass


@cli.command()

def today():

    click.echo("๐Ÿ“… Today's Events:")

    today = datetime.today().strftime('%Y-%m-%d')

    for e in EVENTS:

        if e["date"] == today:

            click.echo(f"- {e['time']}: {e['title']} ๐Ÿ“ {e['location']}")


@cli.command()

@click.option('--title', prompt='Event title')

@click.option('--date', prompt='Event date (YYYY-MM-DD)')

@click.option('--time', prompt='Event time (HH:MM)')

@click.option('--location', prompt='Event location')

def add(title, date, time, location):

    EVENTS.append({"title": title, "date": date, "time": time, "location": location})

    click.echo("✅ Event created successfully!")


@cli.command()

def forecast():

    if not EVENTS:

        click.echo("No events to check weather for.")

        return

    event = EVENTS[-1]

    click.echo(f"๐ŸŒฆ Forecast for: {event['title']}")

    weather = get_weather(event['location'])

    click.echo(f"๐Ÿ“ Location: {event['location']} | Date: {event['date']}")

    click.echo(f"☁️ Weather Forecast: {weather}")


def get_weather(city):

    API_KEY = "your_openweathermap_api_key"

    url = f"https://api.openweathermap.org/data/2.5/weather?q={city}&appid={API_KEY}&units=metric"

    r = requests.get(url)

    if r.status_code == 200:

        data = r.json()

        return f"{data['weather'][0]['main']}, {data['main']['temp']}°C"

    return "Weather data unavailable"


if __name__ == '__main__':

    cli()


Voice-Controlled Notes App

import speech_recognition as sr

import pyttsx3

import os


notes = {}


engine = pyttsx3.init()


def speak(text):

    engine.say(text)

    engine.runAndWait()


def listen_command():

    r = sr.Recognizer()

    with sr.Microphone() as source:

        speak("Listening...")

        audio = r.listen(source)

    try:

        command = r.recognize_google(audio)

        return command.lower()

    except:

        speak("Sorry, I didn't catch that.")

        return ""


def create_note():

    speak("What should I name the note?")

    title = listen_command()

    speak("What is the content?")

    content = listen_command()

    notes[title] = content

    speak(f"Note '{title}' created.")


def read_notes():

    if notes:

        for title, content in notes.items():

            speak(f"{title}: {content}")

    else:

        speak("No notes found.")


def delete_note():

    speak("Which note should I delete?")

    title = listen_command()

    if title in notes:

        del notes[title]

        speak(f"Note '{title}' deleted.")

    else:

        speak("Note not found.")


def main():

    speak("Voice Notes App Started.")

    while True:

        speak("Say a command: create, read, delete, or exit.")

        command = listen_command()


        if "create" in command:

            create_note()

        elif "read" in command:

            read_notes()

        elif "delete" in command:

            delete_note()

        elif "exit" in command:

            speak("Goodbye!")

            break

        else:

            speak("Unknown command.")


if __name__ == "__main__":

    main()


Chemistry Molecule Visualizer

 import streamlit as st

from rdkit import Chem

from rdkit.Chem import Draw, Descriptors

import py3Dmol


def mol_to_3d_view(smiles):

    mol = Chem.MolFromSmiles(smiles)

    mb = Chem.AddHs(mol)

    Chem.EmbedMolecule(mb)

    mol_block = Chem.MolToMolBlock(mb)


    viewer = py3Dmol.view(width=400, height=400)

    viewer.addModel(mol_block, 'mol')

    viewer.setStyle({'stick': {}})

    viewer.zoomTo()

    return viewer


st.title("๐Ÿงช Chemistry Molecule Visualizer")


smiles = st.text_input("Enter SMILES string", "CC(=O)O")  # Acetic Acid


if smiles:

    mol = Chem.MolFromSmiles(smiles)

    

    if mol:

        st.subheader("๐Ÿ“Œ Molecular Structure (2D)")

        st.image(Draw.MolToImage(mol, size=(300, 300)))


        st.subheader("๐Ÿ”ฌ Properties")

        st.markdown(f"**Formula**: {Chem.rdMolDescriptors.CalcMolFormula(mol)}")

        st.markdown(f"**Molecular Weight**: {Descriptors.MolWt(mol):.2f} g/mol")


        st.subheader("๐Ÿงฌ 3D Structure")

        viewer = mol_to_3d_view(smiles)

        viewer_html = viewer._make_html()

        st.components.v1.html(viewer_html, height=450)

    else:

        st.error("Invalid SMILES string. Try again.")


Receipt Text Extractor & Analyzer

 OCR using pytesseract

from PIL import Image

import pytesseract


def extract_text_from_image(image_path):

    image = Image.open(image_path)

    text = pytesseract.image_to_string(image)

    return text

Parse Items and Prices with re

import re

def parse_items(raw_text):
    # Match lines like: "Bread 2.50" or "Milk ....... 1.25"
    pattern = r"([A-Za-z\s]+)\s+([\d]+\.\d{2})"
    matches = re.findall(pattern, raw_text)
    
    items = [{"item": item.strip(), "price": float(price)} for item, price in matches]
    
    total = sum(i["price"] for i in items)
    avg = total / len(items) if items else 0
    
    return items, total, avg

(Optional) Step 3: Streamlit Interface


import streamlit as st
from utils.text_parser import extract_text_from_image, parse_items
import tempfile
import pandas as pd

st.title("๐Ÿงพ Receipt Text Extractor & Analyzer")
uploaded_file = st.file_uploader("Upload Receipt Image", type=["jpg", "png", "jpeg"])

if uploaded_file:
    with tempfile.NamedTemporaryFile(delete=False) as tmp:
        tmp.write(uploaded_file.read())
        tmp_path = tmp.name

    raw_text = extract_text_from_image(tmp_path)
    items, total, avg = parse_items(raw_text)

    df = pd.DataFrame(items)
    st.subheader("๐Ÿ›’ Items Detected:")
    st.table(df)

    st.markdown(f"**Total Cost:** ₹{total:.2f}")
    st.markdown(f"**Average Item Cost:** ₹{avg:.2f}")

    # Download as CSV
    csv = df.to_csv(index=False).encode()
    st.download_button("๐Ÿ“ฅ Download CSV", csv, "receipt_data.csv", "text/csv")

Music Genre Classifier

 # utils/feature_extractor.py

import librosa

import numpy as np


def extract_features(file_path):

    y, sr = librosa.load(file_path, duration=30)

    mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)

    mfccs_mean = np.mean(mfccs.T, axis=0)

    return mfccs_mean


from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import joblib

# Load features (extracted previously)
df = pd.read_csv("features_dataset.csv")  # Your dataset with MFCC + genre
X = df.drop('genre', axis=1)
y = df['genre']

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

clf = RandomForestClassifier()
clf.fit(X_train, y_train)

# Save model
joblib.dump(clf, "model/genre_model.pkl")


# app.py
import streamlit as st
import joblib
import numpy as np
from utils.feature_extractor import extract_features
import tempfile

# Load model
model = joblib.load("model/genre_model.pkl")

st.title("๐ŸŽต Music Genre Classifier")
st.write("Upload a music file to predict its genre")

uploaded_file = st.file_uploader("Choose a file", type=["mp3", "wav"])

if uploaded_file:
    # Save temporarily
    with tempfile.NamedTemporaryFile(delete=False) as tmp:
        tmp.write(uploaded_file.read())
        tmp_path = tmp.name

    # Extract features and predict
    features = extract_features(tmp_path)
    prediction = model.predict([features])[0]
    proba = model.predict_proba([features])

    st.success(f"Predicted Genre: **{prediction}**")
    st.bar_chart(proba[0])

Custom Dictionary Builder

 db.py

import sqlite3


def init_db():

    conn = sqlite3.connect("words.db")

    cursor = conn.cursor()

    cursor.execute('''CREATE TABLE IF NOT EXISTS dictionary (

                        id INTEGER PRIMARY KEY AUTOINCREMENT,

                        word TEXT,

                        language TEXT,

                        meaning TEXT,

                        synonyms TEXT,

                        audio_file TEXT)''')

    conn.commit()

    conn.close()


def add_word(word, lang, meaning, synonyms, audio_file):

    conn = sqlite3.connect("words.db")

    cursor = conn.cursor()

    cursor.execute("INSERT INTO dictionary (word, language, meaning, synonyms, audio_file) VALUES (?, ?, ?, ?, ?)",

                   (word, lang, meaning, synonyms, audio_file))

    conn.commit()

    conn.close()


def search_word(word):

    conn = sqlite3.connect("words.db")

    cursor = conn.cursor()

    cursor.execute("SELECT * FROM dictionary WHERE word = ?", (word,))

    result = cursor.fetchone()

    conn.close()

    return result

tts.py

from gtts import gTTS

import os


def generate_audio(word, lang='en'):

    tts = gTTS(text=word, lang=lang)

    audio_file = f"audio/{word}_{lang}.mp3"

    tts.save(audio_file)

    return audio_file

app.py

import tkinter as tk

from db import init_db, add_word, search_word

from tts import generate_audio

import os

from playsound import playsound


init_db()


def submit_word():

    word = entry_word.get()

    lang = entry_lang.get()

    meaning = entry_meaning.get()

    synonyms = entry_synonyms.get()

    audio_path = generate_audio(word, lang)

    add_word(word, lang, meaning, synonyms, audio_path)

    label_status.config(text="✅ Word added!")


def play_audio():

    word = entry_word.get()

    result = search_word(word)

    if result and os.path.exists(result[5]):

        playsound(result[5])

    else:

        label_status.config(text="❌ Audio not found.")


# GUI Setup

window = tk.Tk()

window.title("๐Ÿ“š Custom Dictionary Builder")

window.geometry("400x400")


entry_word = tk.Entry(window)

entry_word.insert(0, "Word")

entry_word.pack(pady=5)


entry_lang = tk.Entry(window)

entry_lang.insert(0, "Language Code (e.g., en, es)")

entry_lang.pack(pady=5)


entry_meaning = tk.Entry(window)

entry_meaning.insert(0, "Meaning")

entry_meaning.pack(pady=5)


entry_synonyms = tk.Entry(window)

entry_synonyms.insert(0, "Synonyms")

entry_synonyms.pack(pady=5)


tk.Button(window, text="Add Word", command=submit_word).pack(pady=10)

tk.Button(window, text="Play Pronunciation", command=play_audio).pack(pady=5)


label_status = tk.Label(window, text="")

label_status.pack(pady=10)


window.mainloop()



Language Codes for gTTS:

  • English: en

  • Hindi: hi

  • Spanish: es

  • French: fr

  • Malayalam: ml

  • Tamil: ta

Mindfulness & Focus Timer App

 main.py

import tkinter as tk

from timer import start_pomodoro

from breathing import start_breathing

from prompts import get_prompt


window = tk.Tk()

window.title("๐Ÿง  Mindfulness & Focus Timer")

window.geometry("400x300")


label = tk.Label(window, text="Welcome to Focus Time!", font=("Helvetica", 16))

label.pack(pady=10)


tk.Button(window, text="Start Pomodoro", command=start_pomodoro).pack(pady=10)

tk.Button(window, text="Breathing Exercise", command=start_breathing).pack(pady=10)

tk.Button(window, text="Get Mindfulness Prompt", command=lambda: label.config(text=get_prompt())).pack(pady=10)


window.mainloop()

timer.py

import time
import pygame
import threading

def play_sound():
    pygame.init()
    pygame.mixer.init()
    pygame.mixer.music.load("sounds/ding.wav")
    pygame.mixer.music.play()

def start_pomodoro():
    def run():
        print("Focus time started!")
        for i in range(25 * 60, 0, -1):
            mins, secs = divmod(i, 60)
            print(f"{mins:02d}:{secs:02d}", end='\r')
            time.sleep(1)
        play_sound()
        print("\nTime for a break!")

    threading.Thread(target=run).start()

breathing.py

import time
import threading

def start_breathing():
    def run():
        print("Follow the breathing pattern:")
        for _ in range(4):
            print("Inhale... ๐Ÿซ")
            time.sleep(4)
            print("Hold... ✋")
            time.sleep(4)
            print("Exhale... ๐Ÿ˜ฎ‍๐Ÿ’จ")
            time.sleep(4)
    threading.Thread(target=run).start()

prompts.py

import random

prompts = [
    "Breathe in clarity, breathe out stress.",
    "Focus on one thing at a time.",
    "You are in control of your day.",
    "Be here now. ๐ŸŒฑ",
    "Let go of what you can't control."
]

def get_prompt():
    return random.choice(prompts)


AI Image Caption Generator

 import numpy as np

import tensorflow as tf

from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input

from tensorflow.keras.preprocessing import image

from tensorflow.keras.models import Model, load_model

import pickle

import cv2

import streamlit as st

from PIL import Image


# Load Pretrained InceptionV3 Model for Image Feature Extraction

base_model = InceptionV3(weights='imagenet')

model = Model(inputs=base_model.input, outputs=base_model.layers[-2].output)


# Load Pretrained Captioning Model

captioning_model = load_model("image_captioning_model.h5")


# Load Tokenizer & Word Mappings

with open("tokenizer.pickle", "rb") as handle:

    tokenizer = pickle.load(handle)


max_length = 35  # Max caption length


# Extract Features from Image

def extract_features(img_path):

    img = image.load_img(img_path, target_size=(299, 299))

    img = image.img_to_array(img)

    img = np.expand_dims(img, axis=0)

    img = preprocess_input(img)

    feature_vector = model.predict(img)

    return feature_vector


# Generate Caption

def generate_caption(img_path):

    image_features = extract_features(img_path)

    caption = "startseq"

    

    for i in range(max_length):

        sequence = [tokenizer.word_index[word] for word in caption.split() if word in tokenizer.word_index]

        sequence = tf.keras.preprocessing.sequence.pad_sequences([sequence], maxlen=max_length)

        predicted_index = np.argmax(captioning_model.predict([image_features, sequence]), axis=-1)

        word = tokenizer.index_word.get(predicted_index[0], "")

        if word == "endseq":

            break

        caption += " " + word

    

    return caption.replace("startseq", "").replace("endseq", "").strip()


# Streamlit Web Interface

st.title("๐Ÿ–ผ️ AI Image Caption Generator")

uploaded_file = st.file_uploader("Upload an image...", type=["jpg", "png", "jpeg"])


if uploaded_file is not None:

    img = Image.open(uploaded_file)

    st.image(img, caption="Uploaded Image", use_column_width=True)

    

    # Save the uploaded image temporarily

    img_path = "temp.jpg"

    img.save(img_path)

    

    # Generate Caption

    with st.spinner("Generating Caption..."):

        caption_text = generate_caption(img_path)

    

    st.subheader("๐Ÿ“ Generated Caption:")

    st.write(caption_text)


AI Resume Scorer

 import fitz  # PyMuPDF for PDF parsing

import docx2txt

import spacy

import re

from collections import Counter

import tkinter as tk

from tkinter import filedialog, messagebox


# Load NLP Model (English)

nlp = spacy.load("en_core_web_sm")


# Job Description (Example)

job_description = """

We are looking for a Data Scientist with expertise in Python, Machine Learning, and Data Analysis.

Candidates must have experience with Pandas, NumPy, and Scikit-learn.

Strong communication and teamwork skills are required.

"""


# Function to extract text from PDF

def extract_text_from_pdf(pdf_path):

    text = ""

    doc = fitz.open(pdf_path)

    for page in doc:

        text += page.get_text()

    return text


# Function to extract text from DOCX

def extract_text_from_docx(docx_path):

    return docx2txt.process(docx_path)


# Function to clean and preprocess text

def clean_text(text):

    text = re.sub(r"\s+", " ", text)  # Remove extra spaces

    text = text.lower()  # Convert to lowercase

    return text


# Function to extract keywords using NLP

def extract_keywords(text):

    doc = nlp(text)

    keywords = [token.text for token in doc if token.is_alpha and not token.is_stop]

    return Counter(keywords)


# Function to score the resume

def score_resume(resume_text, job_description):

    resume_keywords = extract_keywords(resume_text)

    job_keywords = extract_keywords(job_description)


    # Calculate Keyword Match Score

    matched_keywords = sum((resume_keywords & job_keywords).values())

    total_keywords = sum(job_keywords.values())

    keyword_score = (matched_keywords / total_keywords) * 100 if total_keywords else 0


    # Readability Score (Basic: Word Count / Sentence Count)

    sentence_count = len(re.findall(r"[.!?]", resume_text))

    word_count = len(resume_text.split())

    readability_score = (word_count / (sentence_count + 1)) * 2  # Simplified readability measure


    # Final Score Calculation (Weighted Average)

    final_score = (keyword_score * 0.7) + (readability_score * 0.3)

    return round(final_score, 2), keyword_score, readability_score


# GUI for File Upload

def upload_file():

    file_path = filedialog.askopenfilename(filetypes=[("PDF Files", "*.pdf"), ("Word Files", "*.docx")])

    

    if file_path:

        if file_path.endswith(".pdf"):

            resume_text = extract_text_from_pdf(file_path)

        elif file_path.endswith(".docx"):

            resume_text = extract_text_from_docx(file_path)

        else:

            messagebox.showerror("Error", "Unsupported file format!")

            return

        

        # Clean and score resume

        cleaned_resume = clean_text(resume_text)

        final_score, keyword_score, readability_score = score_resume(cleaned_resume, job_description)

        

        # Show results

        messagebox.showinfo("Resume Score", f"๐Ÿ“„ Resume Score: {final_score}%\n\n"

                                             f"๐Ÿ”‘ Keyword Match: {keyword_score:.2f}%\n"

                                             f"๐Ÿ“– Readability Score: {readability_score:.2f}%")


# GUI Setup

root = tk.Tk()

root.title("AI Resume Scorer")

root.geometry("300x200")


upload_btn = tk.Button(root, text="Upload Resume", command=upload_file, padx=10, pady=5)

upload_btn.pack(pady=20)


root.mainloop()


Job Application Tracker

 import tkinter as tk

from tkinter import ttk, messagebox

import sqlite3

import pandas as pd

import smtplib


# Database Setup

conn = sqlite3.connect("job_tracker.db")

cursor = conn.cursor()

cursor.execute("""

    CREATE TABLE IF NOT EXISTS jobs (

        id INTEGER PRIMARY KEY AUTOINCREMENT,

        company TEXT,

        role TEXT,

        date_applied TEXT,

        status TEXT

    )

""")

conn.commit()


# GUI Application

class JobTrackerApp:

    def __init__(self, root):

        self.root = root

        self.root.title("Job Application Tracker")

        self.root.geometry("600x400")


        # Labels

        ttk.Label(root, text="Company:").grid(row=0, column=0)

        ttk.Label(root, text="Role:").grid(row=1, column=0)

        ttk.Label(root, text="Date Applied:").grid(row=2, column=0)

        ttk.Label(root, text="Status:").grid(row=3, column=0)


        # Entry Fields

        self.company_entry = ttk.Entry(root)

        self.role_entry = ttk.Entry(root)

        self.date_entry = ttk.Entry(root)

        self.status_combo = ttk.Combobox(root, values=["Pending", "Interview", "Rejected", "Hired"])

        

        self.company_entry.grid(row=0, column=1)

        self.role_entry.grid(row=1, column=1)

        self.date_entry.grid(row=2, column=1)

        self.status_combo.grid(row=3, column=1)


        # Buttons

        ttk.Button(root, text="Add Job", command=self.add_job).grid(row=4, column=0)

        ttk.Button(root, text="Show Jobs", command=self.show_jobs).grid(row=4, column=1)

        ttk.Button(root, text="Export to CSV", command=self.export_csv).grid(row=5, column=0)

        ttk.Button(root, text="Send Follow-up", command=self.send_followup).grid(row=5, column=1)


    def add_job(self):

        company = self.company_entry.get()

        role = self.role_entry.get()

        date = self.date_entry.get()

        status = self.status_combo.get()


        if not company or not role or not date or not status:

            messagebox.showerror("Error", "All fields are required!")

            return

        

        cursor.execute("INSERT INTO jobs (company, role, date_applied, status) VALUES (?, ?, ?, ?)", 

                       (company, role, date, status))

        conn.commit()

        messagebox.showinfo("Success", "Job Application Added!")


    def show_jobs(self):

        jobs_window = tk.Toplevel(self.root)

        jobs_window.title("Job Applications")

        tree = ttk.Treeview(jobs_window, columns=("ID", "Company", "Role", "Date", "Status"), show="headings")

        tree.heading("ID", text="ID")

        tree.heading("Company", text="Company")

        tree.heading("Role", text="Role")

        tree.heading("Date", text="Date Applied")

        tree.heading("Status", text="Status")

        tree.pack(fill="both", expand=True)


        cursor.execute("SELECT * FROM jobs")

        for row in cursor.fetchall():

            tree.insert("", "end", values=row)


    def export_csv(self):

        cursor.execute("SELECT * FROM jobs")

        data = cursor.fetchall()

        df = pd.DataFrame(data, columns=["ID", "Company", "Role", "Date Applied", "Status"])

        df.to_csv("job_applications.csv", index=False)

        messagebox.showinfo("Exported", "Job Applications saved as CSV!")


    def send_followup(self):

        email = "your-email@gmail.com"  # Change to your email

        password = "your-password"  # Use App Password for security


        cursor.execute("SELECT company, role FROM jobs WHERE status='Pending'")

        pending_jobs = cursor.fetchall()


        if not pending_jobs:

            messagebox.showinfo("No Follow-ups", "No pending applications to follow up on.")

            return

        

        msg = "Subject: Follow-up on Job Applications\n\n"

        msg += "Here are your pending job applications:\n"

        for company, role in pending_jobs:

            msg += f"- {role} at {company}\n"


        try:

            server = smtplib.SMTP("smtp.gmail.com", 587)

            server.starttls()

            server.login(email, password)

            server.sendmail(email, email, msg)

            server.quit()

            messagebox.showinfo("Email Sent", "Follow-up email sent successfully!")

        except Exception as e:

            messagebox.showerror("Error", f"Failed to send email: {e}")


# Run App

root = tk.Tk()

app = JobTrackerApp(root)

root.mainloop()