Music Genre Classifier

 # utils/feature_extractor.py

import librosa

import numpy as np


def extract_features(file_path):

    y, sr = librosa.load(file_path, duration=30)

    mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)

    mfccs_mean = np.mean(mfccs.T, axis=0)

    return mfccs_mean


from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import joblib

# Load features (extracted previously)
df = pd.read_csv("features_dataset.csv")  # Your dataset with MFCC + genre
X = df.drop('genre', axis=1)
y = df['genre']

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

clf = RandomForestClassifier()
clf.fit(X_train, y_train)

# Save model
joblib.dump(clf, "model/genre_model.pkl")


# app.py
import streamlit as st
import joblib
import numpy as np
from utils.feature_extractor import extract_features
import tempfile

# Load model
model = joblib.load("model/genre_model.pkl")

st.title("🎵 Music Genre Classifier")
st.write("Upload a music file to predict its genre")

uploaded_file = st.file_uploader("Choose a file", type=["mp3", "wav"])

if uploaded_file:
    # Save temporarily
    with tempfile.NamedTemporaryFile(delete=False) as tmp:
        tmp.write(uploaded_file.read())
        tmp_path = tmp.name

    # Extract features and predict
    features = extract_features(tmp_path)
    prediction = model.predict([features])[0]
    proba = model.predict_proba([features])

    st.success(f"Predicted Genre: **{prediction}**")
    st.bar_chart(proba[0])

Custom Dictionary Builder

 db.py

import sqlite3


def init_db():

    conn = sqlite3.connect("words.db")

    cursor = conn.cursor()

    cursor.execute('''CREATE TABLE IF NOT EXISTS dictionary (

                        id INTEGER PRIMARY KEY AUTOINCREMENT,

                        word TEXT,

                        language TEXT,

                        meaning TEXT,

                        synonyms TEXT,

                        audio_file TEXT)''')

    conn.commit()

    conn.close()


def add_word(word, lang, meaning, synonyms, audio_file):

    conn = sqlite3.connect("words.db")

    cursor = conn.cursor()

    cursor.execute("INSERT INTO dictionary (word, language, meaning, synonyms, audio_file) VALUES (?, ?, ?, ?, ?)",

                   (word, lang, meaning, synonyms, audio_file))

    conn.commit()

    conn.close()


def search_word(word):

    conn = sqlite3.connect("words.db")

    cursor = conn.cursor()

    cursor.execute("SELECT * FROM dictionary WHERE word = ?", (word,))

    result = cursor.fetchone()

    conn.close()

    return result

tts.py

from gtts import gTTS

import os


def generate_audio(word, lang='en'):

    tts = gTTS(text=word, lang=lang)

    audio_file = f"audio/{word}_{lang}.mp3"

    tts.save(audio_file)

    return audio_file

app.py

import tkinter as tk

from db import init_db, add_word, search_word

from tts import generate_audio

import os

from playsound import playsound


init_db()


def submit_word():

    word = entry_word.get()

    lang = entry_lang.get()

    meaning = entry_meaning.get()

    synonyms = entry_synonyms.get()

    audio_path = generate_audio(word, lang)

    add_word(word, lang, meaning, synonyms, audio_path)

    label_status.config(text="✅ Word added!")


def play_audio():

    word = entry_word.get()

    result = search_word(word)

    if result and os.path.exists(result[5]):

        playsound(result[5])

    else:

        label_status.config(text="❌ Audio not found.")


# GUI Setup

window = tk.Tk()

window.title("📚 Custom Dictionary Builder")

window.geometry("400x400")


entry_word = tk.Entry(window)

entry_word.insert(0, "Word")

entry_word.pack(pady=5)


entry_lang = tk.Entry(window)

entry_lang.insert(0, "Language Code (e.g., en, es)")

entry_lang.pack(pady=5)


entry_meaning = tk.Entry(window)

entry_meaning.insert(0, "Meaning")

entry_meaning.pack(pady=5)


entry_synonyms = tk.Entry(window)

entry_synonyms.insert(0, "Synonyms")

entry_synonyms.pack(pady=5)


tk.Button(window, text="Add Word", command=submit_word).pack(pady=10)

tk.Button(window, text="Play Pronunciation", command=play_audio).pack(pady=5)


label_status = tk.Label(window, text="")

label_status.pack(pady=10)


window.mainloop()



Language Codes for gTTS:

  • English: en

  • Hindi: hi

  • Spanish: es

  • French: fr

  • Malayalam: ml

  • Tamil: ta

Mindfulness & Focus Timer App

 main.py

import tkinter as tk

from timer import start_pomodoro

from breathing import start_breathing

from prompts import get_prompt


window = tk.Tk()

window.title("🧠 Mindfulness & Focus Timer")

window.geometry("400x300")


label = tk.Label(window, text="Welcome to Focus Time!", font=("Helvetica", 16))

label.pack(pady=10)


tk.Button(window, text="Start Pomodoro", command=start_pomodoro).pack(pady=10)

tk.Button(window, text="Breathing Exercise", command=start_breathing).pack(pady=10)

tk.Button(window, text="Get Mindfulness Prompt", command=lambda: label.config(text=get_prompt())).pack(pady=10)


window.mainloop()

timer.py

import time
import pygame
import threading

def play_sound():
    pygame.init()
    pygame.mixer.init()
    pygame.mixer.music.load("sounds/ding.wav")
    pygame.mixer.music.play()

def start_pomodoro():
    def run():
        print("Focus time started!")
        for i in range(25 * 60, 0, -1):
            mins, secs = divmod(i, 60)
            print(f"{mins:02d}:{secs:02d}", end='\r')
            time.sleep(1)
        play_sound()
        print("\nTime for a break!")

    threading.Thread(target=run).start()

breathing.py

import time
import threading

def start_breathing():
    def run():
        print("Follow the breathing pattern:")
        for _ in range(4):
            print("Inhale... 🫁")
            time.sleep(4)
            print("Hold... ✋")
            time.sleep(4)
            print("Exhale... 😮‍💨")
            time.sleep(4)
    threading.Thread(target=run).start()

prompts.py

import random

prompts = [
    "Breathe in clarity, breathe out stress.",
    "Focus on one thing at a time.",
    "You are in control of your day.",
    "Be here now. 🌱",
    "Let go of what you can't control."
]

def get_prompt():
    return random.choice(prompts)


AI Image Caption Generator

 import numpy as np

import tensorflow as tf

from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input

from tensorflow.keras.preprocessing import image

from tensorflow.keras.models import Model, load_model

import pickle

import cv2

import streamlit as st

from PIL import Image


# Load Pretrained InceptionV3 Model for Image Feature Extraction

base_model = InceptionV3(weights='imagenet')

model = Model(inputs=base_model.input, outputs=base_model.layers[-2].output)


# Load Pretrained Captioning Model

captioning_model = load_model("image_captioning_model.h5")


# Load Tokenizer & Word Mappings

with open("tokenizer.pickle", "rb") as handle:

    tokenizer = pickle.load(handle)


max_length = 35  # Max caption length


# Extract Features from Image

def extract_features(img_path):

    img = image.load_img(img_path, target_size=(299, 299))

    img = image.img_to_array(img)

    img = np.expand_dims(img, axis=0)

    img = preprocess_input(img)

    feature_vector = model.predict(img)

    return feature_vector


# Generate Caption

def generate_caption(img_path):

    image_features = extract_features(img_path)

    caption = "startseq"

    

    for i in range(max_length):

        sequence = [tokenizer.word_index[word] for word in caption.split() if word in tokenizer.word_index]

        sequence = tf.keras.preprocessing.sequence.pad_sequences([sequence], maxlen=max_length)

        predicted_index = np.argmax(captioning_model.predict([image_features, sequence]), axis=-1)

        word = tokenizer.index_word.get(predicted_index[0], "")

        if word == "endseq":

            break

        caption += " " + word

    

    return caption.replace("startseq", "").replace("endseq", "").strip()


# Streamlit Web Interface

st.title("🖼️ AI Image Caption Generator")

uploaded_file = st.file_uploader("Upload an image...", type=["jpg", "png", "jpeg"])


if uploaded_file is not None:

    img = Image.open(uploaded_file)

    st.image(img, caption="Uploaded Image", use_column_width=True)

    

    # Save the uploaded image temporarily

    img_path = "temp.jpg"

    img.save(img_path)

    

    # Generate Caption

    with st.spinner("Generating Caption..."):

        caption_text = generate_caption(img_path)

    

    st.subheader("📝 Generated Caption:")

    st.write(caption_text)


AI Resume Scorer

 import fitz  # PyMuPDF for PDF parsing

import docx2txt

import spacy

import re

from collections import Counter

import tkinter as tk

from tkinter import filedialog, messagebox


# Load NLP Model (English)

nlp = spacy.load("en_core_web_sm")


# Job Description (Example)

job_description = """

We are looking for a Data Scientist with expertise in Python, Machine Learning, and Data Analysis.

Candidates must have experience with Pandas, NumPy, and Scikit-learn.

Strong communication and teamwork skills are required.

"""


# Function to extract text from PDF

def extract_text_from_pdf(pdf_path):

    text = ""

    doc = fitz.open(pdf_path)

    for page in doc:

        text += page.get_text()

    return text


# Function to extract text from DOCX

def extract_text_from_docx(docx_path):

    return docx2txt.process(docx_path)


# Function to clean and preprocess text

def clean_text(text):

    text = re.sub(r"\s+", " ", text)  # Remove extra spaces

    text = text.lower()  # Convert to lowercase

    return text


# Function to extract keywords using NLP

def extract_keywords(text):

    doc = nlp(text)

    keywords = [token.text for token in doc if token.is_alpha and not token.is_stop]

    return Counter(keywords)


# Function to score the resume

def score_resume(resume_text, job_description):

    resume_keywords = extract_keywords(resume_text)

    job_keywords = extract_keywords(job_description)


    # Calculate Keyword Match Score

    matched_keywords = sum((resume_keywords & job_keywords).values())

    total_keywords = sum(job_keywords.values())

    keyword_score = (matched_keywords / total_keywords) * 100 if total_keywords else 0


    # Readability Score (Basic: Word Count / Sentence Count)

    sentence_count = len(re.findall(r"[.!?]", resume_text))

    word_count = len(resume_text.split())

    readability_score = (word_count / (sentence_count + 1)) * 2  # Simplified readability measure


    # Final Score Calculation (Weighted Average)

    final_score = (keyword_score * 0.7) + (readability_score * 0.3)

    return round(final_score, 2), keyword_score, readability_score


# GUI for File Upload

def upload_file():

    file_path = filedialog.askopenfilename(filetypes=[("PDF Files", "*.pdf"), ("Word Files", "*.docx")])

    

    if file_path:

        if file_path.endswith(".pdf"):

            resume_text = extract_text_from_pdf(file_path)

        elif file_path.endswith(".docx"):

            resume_text = extract_text_from_docx(file_path)

        else:

            messagebox.showerror("Error", "Unsupported file format!")

            return

        

        # Clean and score resume

        cleaned_resume = clean_text(resume_text)

        final_score, keyword_score, readability_score = score_resume(cleaned_resume, job_description)

        

        # Show results

        messagebox.showinfo("Resume Score", f"📄 Resume Score: {final_score}%\n\n"

                                             f"🔑 Keyword Match: {keyword_score:.2f}%\n"

                                             f"📖 Readability Score: {readability_score:.2f}%")


# GUI Setup

root = tk.Tk()

root.title("AI Resume Scorer")

root.geometry("300x200")


upload_btn = tk.Button(root, text="Upload Resume", command=upload_file, padx=10, pady=5)

upload_btn.pack(pady=20)


root.mainloop()