Motion Detection Security Recorder

"""

Motion Detection Security Recorder

---------------------------------

Usage:

    python motion_recorder.py

    python motion_recorder.py --source 0                      # default webcam

    python motion_recorder.py --source "video.mp4"

    python motion_recorder.py --source "rtsp://...."


Outputs:

    ./recordings/YYYYMMDD_HHMMSS_motion.mp4

    ./recordings/recording_log.csv  (optional)


Notes:

- Adjust MIN_AREA and SENSITIVITY for your camera / scene.

- Pre-buffer keeps recent frames so clip contains seconds before detection.

"""


import cv2

import numpy as np

import argparse

import time

from datetime import datetime

from collections import deque

import os

import csv


# -------------------------

# Configuration (tweakable)

# -------------------------

OUTPUT_DIR = "recordings"

LOG_CSV = True            # write a CSV log of recorded clips

FPS = 20                  # expected framerate for recording (adjust to your camera)

FRAME_WIDTH = 640         # resize frames for faster processing

FRAME_HEIGHT = 480

MIN_AREA = 1200           # minimum contour area to be considered motion (tweak)

SENSITIVITY = 25          # how much difference triggers motion (lower => more sensitive)

PRE_BUFFER_SECONDS = 3    # include 3 seconds before motion started

POST_RECORD_SECONDS = 4   # record N seconds after motion stops

CODEC = "mp4v"            # codec fourcc (try 'XVID' or 'avc1' if 'mp4v' not available)


# -------------------------

# Helpers

# -------------------------

def ensure_dir(path):

    if not os.path.exists(path):

        os.makedirs(path, exist_ok=True)


def timestamp_str():

    return datetime.now().strftime("%Y%m%d_%H%M%S")


def make_output_filename():

    return f"{timestamp_str()}_motion.mp4"


def write_log(csv_path, row):

    header = ["filename","start_time","end_time","duration_s","frames","source"]

    exists = os.path.exists(csv_path)

    with open(csv_path, "a", newline="", encoding="utf-8") as f:

        w = csv.writer(f)

        if not exists:

            w.writerow(header)

        w.writerow(row)


# -------------------------

# Motion Recorder class

# -------------------------

class MotionRecorder:

    def __init__(self, source=0, output_dir=OUTPUT_DIR):

        self.source = source

        self.output_dir = output_dir

        ensure_dir(self.output_dir)

        self.log_path = os.path.join(self.output_dir, "recording_log.csv") if LOG_CSV else None


        self.cap = cv2.VideoCapture(self.source)

        if not self.cap.isOpened():

            raise RuntimeError(f"Cannot open source: {source}")


        # If camera provides FPS, override

        native_fps = self.cap.get(cv2.CAP_PROP_FPS)

        if native_fps and native_fps > 0:

            self.fps = native_fps

        else:

            self.fps = FPS


        # Use resize dims

        self.width = FRAME_WIDTH

        self.height = FRAME_HEIGHT


        # background subtractor (more robust) + simple diff fallback

        self.bg_sub = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=16, detectShadows=True)

        self.pre_buffer = deque(maxlen=int(self.fps * PRE_BUFFER_SECONDS))

        self.is_recording = False

        self.writer = None

        self.record_start_time = None

        self.frames_recorded = 0

        self.last_motion_time = None


    def release(self):

        if self.cap:

            self.cap.release()

        if self.writer:

            self.writer.release()

        cv2.destroyAllWindows()


    def start(self):

        print("Starting motion detection. Press 'q' to quit.")

        try:

            while True:

                ret, frame = self.cap.read()

                if not ret:

                    print("Stream ended or cannot fetch frame.")

                    break


                # resize for consistent processing

                frame = cv2.resize(frame, (self.width, self.height))

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                gray_blur = cv2.GaussianBlur(gray, (5,5), 0)


                # background subtraction mask

                fgmask = self.bg_sub.apply(gray_blur)

                # threshold to reduce noise

                _, thresh = cv2.threshold(fgmask, SENSITIVITY, 255, cv2.THRESH_BINARY)

                # morphological operations to reduce small noise

                kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))

                clean = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)

                clean = cv2.morphologyEx(clean, cv2.MORPH_DILATE, kernel, iterations=2)


                # find contours

                contours, _ = cv2.findContours(clean, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

                motion_detected = False

                for cnt in contours:

                    if cv2.contourArea(cnt) >= MIN_AREA:

                        motion_detected = True

                        (x,y,w,h) = cv2.boundingRect(cnt)

                        # draw rectangle for preview

                        cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)


                # push frame into pre-buffer (store color frames)

                self.pre_buffer.append(frame.copy())


                # Recording logic

                now = time.time()

                if motion_detected:

                    self.last_motion_time = now

                    if not self.is_recording:

                        # start new recording using pre-buffer

                        fname = make_output_filename()

                        out_path = os.path.join(self.output_dir, fname)

                        fourcc = cv2.VideoWriter_fourcc(*CODEC)

                        self.writer = cv2.VideoWriter(out_path, fourcc, self.fps, (self.width, self.height))

                        if not self.writer.isOpened():

                            print("Warning: VideoWriter failed to open. Check codec availability.")

                        # flush pre-buffer to writer

                        for bf in list(self.pre_buffer):

                            if self.writer:

                                self.writer.write(bf)

                        self.is_recording = True

                        self.record_start_time = datetime.now()

                        self.frames_recorded = len(self.pre_buffer)

                        self.current_out_path = out_path

                        print(f"[{self.record_start_time}] Motion started -> Recording to {out_path}")


                # If recording, write current frame and manage stop condition

                if self.is_recording:

                    if self.writer:

                        self.writer.write(frame)

                    self.frames_recorded += 1

                    # stop if no motion for POST_RECORD_SECONDS

                    if self.last_motion_time and (now - self.last_motion_time) > POST_RECORD_SECONDS:

                        # finalize

                        record_end = datetime.now()

                        duration = (record_end - self.record_start_time).total_seconds()

                        print(f"[{record_end}] Motion ended. Duration: {duration:.2f}s, Frames: {self.frames_recorded}")

                        # close writer

                        if self.writer:

                            self.writer.release()

                            self.writer = None

                        # write log

                        if LOG_CSV and self.log_path:

                            write_log(self.log_path, [

                                os.path.basename(self.current_out_path),

                                self.record_start_time.strftime("%Y-%m-%d %H:%M:%S"),

                                record_end.strftime("%Y-%m-%d %H:%M:%S"),

                                f"{duration:.2f}",

                                str(self.frames_recorded),

                                str(self.source)

                            ])

                        self.is_recording = False

                        self.frames_recorded = 0

                        # clear pre_buffer so next record begins clean

                        self.pre_buffer.clear()


                # Show simple preview window (optional)

                preview = frame.copy()

                status_text = f"REC" if self.is_recording else "Idle"

                cv2.putText(preview, f"Status: {status_text}", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,255) if self.is_recording else (0,255,0), 2)

                cv2.imshow("Motion Recorder - Preview", preview)


                # keyboard quit

                key = cv2.waitKey(1) & 0xFF

                if key == ord('q'):

                    print("Quit requested by user.")

                    break


        except KeyboardInterrupt:

            print("Interrupted by user.")

        finally:

            # cleanup

            if self.writer:

                self.writer.release()

                if LOG_CSV and self.log_path:

                    # if we were recording when interrupted, log end

                    end_time = datetime.now()

                    duration = (end_time - self.record_start_time).total_seconds() if self.record_start_time else 0

                    write_log(self.log_path, [

                        os.path.basename(self.current_out_path),

                        self.record_start_time.strftime("%Y-%m-%d %H:%M:%S") if self.record_start_time else "",

                        end_time.strftime("%Y-%m-%d %H:%M:%S"),

                        f"{duration:.2f}",

                        str(self.frames_recorded),

                        str(self.source)

                    ])

            self.release()

            print("Released resources. Exiting.")


# -------------------------

# CLI arg parsing

# -------------------------

def parse_args():

    parser = argparse.ArgumentParser(description="Motion Detection Security Recorder")

    parser.add_argument("--source", type=str, default="0", help="Video source: 0 (webcam), file path, or RTSP URL")

    parser.add_argument("--out", type=str, default=OUTPUT_DIR, help="Output recordings folder")

    parser.add_argument("--fps", type=int, default=FPS, help="Recording FPS fallback")

    parser.add_argument("--w", type=int, default=FRAME_WIDTH, help="Frame width (resize)")

    parser.add_argument("--h", type=int, default=FRAME_HEIGHT, help="Frame height (resize)")

    parser.add_argument("--min-area", type=int, default=MIN_AREA, help="Min contour area to detect motion")

    parser.add_argument("--sensitivity", type=int, default=SENSITIVITY, help="Threshold sensitivity for mask")

    args = parser.parse_args()

    return args


# -------------------------

# Entrypoint

# -------------------------

def main():

    args = parse_args()

    source = args.source

    # convert "0" -> 0 for webcam

    if source.isdigit():

        source = int(source)

    global FPS, FRAME_WIDTH, FRAME_HEIGHT, MIN_AREA, SENSITIVITY, OUTPUT_DIR

    FPS = args.fps

    FRAME_WIDTH = args.w

    FRAME_HEIGHT = args.h

    MIN_AREA = args.min_area

    SENSITIVITY = args.sensitivity

    OUTPUT_DIR = args.out


    ensure_dir(OUTPUT_DIR)

    recorder = MotionRecorder(source=source, output_dir=OUTPUT_DIR)

    recorder.start()


if __name__ == "__main__":

    main()


Reverse Image Search (Local Only)

import os

import cv2

import numpy as np

import pickle

from PIL import Image, ImageTk

import tkinter as tk

from tkinter import ttk, filedialog, messagebox

from pathlib import Path

from math import ceil


# ----------------------

# Config

# ----------------------

SUPPORTED_EXT = (".jpg", ".jpeg", ".png", ".bmp", ".tiff")

CACHE_FILE = "image_features_cache.pkl"  # optional cache to speed up indexing

THUMB_SIZE = (200, 150)  # thumbnail size for display


# ----------------------

# Feature detector factory (SIFT preferred, fallback to ORB)

# ----------------------

def make_feature_detector():

    try:

        # try SIFT (requires opencv-contrib)

        sift = cv2.SIFT_create()

        print("Using SIFT detector")

        return ("SIFT", sift)

    except Exception:

        # fallback to ORB

        orb = cv2.ORB_create(nfeatures=1500)

        print("SIFT not available — falling back to ORB")

        return ("ORB", orb)


# ----------------------

# Matcher factory

# ----------------------

def make_matcher(detector_name):

    if detector_name == "SIFT":

        # FLANN parameters for SIFT (float descriptors)

        FLANN_INDEX_KDTREE = 1

        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)

        search_params = dict(checks=50)

        matcher = cv2.FlannBasedMatcher(index_params, search_params)

        return matcher

    else:

        # ORB uses Hamming distance (binary descriptors)

        matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

        return matcher


# ----------------------

# Compute descriptors for one image

# ----------------------

def compute_descriptors(img_path, detector_tuple):

    """

    Returns: keypoints, descriptors

    """

    detector_name, detector = detector_tuple

    img = cv2.imdecode(np.fromfile(str(img_path), dtype=np.uint8), cv2.IMREAD_GRAYSCALE)

    if img is None:

        raise ValueError(f"Failed to read image: {img_path}")

    # optional resize to speed up (keep aspect)

    h, w = img.shape

    max_dim = 1024

    if max(h, w) > max_dim:

        scale = max_dim / max(h, w)

        img = cv2.resize(img, (int(w*scale), int(h*scale)), interpolation=cv2.INTER_AREA)


    kp, des = detector.detectAndCompute(img, None)

    if des is None:

        # no descriptors found — return empty

        des = np.array([], dtype=np.float32).reshape(0, 128 if detector_name == "SIFT" else 32)

    return kp, des


# ----------------------

# Indexing folder of images

# ----------------------

def index_folder(folder_path, detector_tuple, cache_enabled=True):

    """

    Scans folder for images, computes descriptors, returns list of records:

    [ {"path": Path(...), "kp": keypoints, "des": descriptors} ... ]

    """

    folder = Path(folder_path)

    if not folder.exists() or not folder.is_dir():

        raise ValueError("Folder path invalid")


    # Try load cache (only if detector matches)

    cache = {}

    if cache_enabled and os.path.exists(CACHE_FILE):

        try:

            with open(CACHE_FILE, "rb") as f:

                cache = pickle.load(f)

        except Exception:

            cache = {}


    records = []

    for p in sorted(folder.iterdir()):

        if p.suffix.lower() not in SUPPORTED_EXT:

            continue

        key = str(p.resolve())

        # cached item must match detector name to be reused

        cached = cache.get(key)

        use_cache = cached and cached.get("detector") == detector_tuple[0]

        if use_cache:

            rec = {"path": Path(key), "kp": None, "des": cached["descriptors"]}

            print("Cache hit:", p.name)

        else:

            try:

                kp, des = compute_descriptors(p, detector_tuple)

            except Exception as e:

                print("Failed to compute:", p, e)

                kp, des = [], np.array([])

            rec = {"path": p, "kp": kp, "des": des}

            # store to cache

            cache[key] = {"detector": detector_tuple[0], "descriptors": des}

        records.append(rec)


    if cache_enabled:

        try:

            with open(CACHE_FILE, "wb") as f:

                pickle.dump(cache, f)

        except Exception as e:

            print("Could not write cache:", e)


    return records


# ----------------------

# Matching logic

# ----------------------

def match_descriptors(query_des, target_des, matcher, detector_name, ratio_thresh=0.75):

    """

    Returns number of good matches using Lowe ratio test for k=2 neighbors.

    For ORB (binary), ratio test still works with BFMatcher and knn.

    """

    if query_des is None or target_des is None or len(query_des) == 0 or len(target_des) == 0:

        return 0, []


    # For FLANN with SIFT, descriptors must be float32

    if detector_name == "SIFT":

        if query_des.dtype != np.float32:

            query_des = query_des.astype(np.float32)

        if target_des.dtype != np.float32:

            target_des = target_des.astype(np.float32)


    try:

        matches = matcher.knnMatch(query_des, target_des, k=2)

    except Exception:

        # fallback: use BFMatcher with crossCheck off

        bf = cv2.BFMatcher()

        raw = bf.match(query_des, target_des)

        # treat each as good match (not optimal)

        good = raw

        return len(good), good


    # Apply ratio test

    good = []

    for m_n in matches:

        if len(m_n) < 2:

            continue

        m, n = m_n

        if m.distance < ratio_thresh * n.distance:

            good.append(m)

    return len(good), good


# ----------------------

# Query: find top K similar

# ----------------------

def find_similar(query_path, records, detector_tuple, top_k=6):

    detector_name, _ = detector_tuple

    matcher = make_matcher(detector_name)


    # compute descriptors for query

    qkp, qdes = compute_descriptors(query_path, detector_tuple)

    results = []

    for rec in records:

        tdes = rec["des"]

        count, good_matches = match_descriptors(qdes, tdes, matcher, detector_name)

        # normalized score: matches / sqrt(size_query * size_target) to penalize huge images

        denom = max(1.0, np.sqrt(max(1,len(qdes)) * max(1,len(tdes))))

        score = count / denom

        results.append({"path": rec["path"], "matches": count, "score": score, "good": good_matches})

    # sort by score descending

    results = sorted(results, key=lambda r: r["score"], reverse=True)

    return results[:top_k]


# ----------------------

# Utilities: load thumbnail for Tkinter display

# ----------------------

def pil_image_from_path(p):

    # handle non-ascii paths by reading bytes then PIL

    arr = np.fromfile(str(p), dtype=np.uint8)

    img = cv2.imdecode(arr, cv2.IMREAD_COLOR)

    if img is None:

        return None

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    pil = Image.fromarray(img)

    return pil


# ----------------------

# Tkinter GUI

# ----------------------

class ReverseImageSearchGUI:

    def __init__(self, master):

        self.master = master

        master.title("Reverse Image Search — Local (SIFT/ORB)")

        master.geometry("1000x700")


        self.detector_tuple = make_feature_detector()

        self.records = []

        self.indexed_folder = None


        # Top controls

        top = ttk.Frame(master)

        top.pack(side=tk.TOP, fill=tk.X, padx=8, pady=8)


        ttk.Button(top, text="Choose Images Folder", command=self.choose_folder).pack(side=tk.LEFT, padx=4)

        self.folder_label = ttk.Label(top, text="No folder chosen")

        self.folder_label.pack(side=tk.LEFT, padx=6)

        ttk.Button(top, text="Index Folder", command=self.index_folder).pack(side=tk.LEFT, padx=6)

        ttk.Button(top, text="Choose Query Image", command=self.choose_query).pack(side=tk.LEFT, padx=6)

        ttk.Label(top, text="Top K:").pack(side=tk.LEFT, padx=(10,0))

        self.topk_var = tk.IntVar(value=6)

        ttk.Entry(top, textvariable=self.topk_var, width=4).pack(side=tk.LEFT)


        # Query preview + results area

        mid = ttk.Frame(master)

        mid.pack(fill=tk.BOTH, expand=True, padx=8, pady=6)


        left = ttk.Frame(mid, width=300)

        left.pack(side=tk.LEFT, fill=tk.Y)

        ttk.Label(left, text="Query Image:").pack(anchor="w")

        self.query_canvas = tk.Label(left, text="No query selected", width=40, height=12, bg="#ddd")

        self.query_canvas.pack(padx=6, pady=6)


        ttk.Button(left, text="Clear Cache", command=self.clear_cache).pack(pady=6)

        ttk.Button(left, text="Re-index", command=self.reindex).pack(pady=6)


        right = ttk.Frame(mid)

        right.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True)

        ttk.Label(right, text="Top Matches:").pack(anchor="w")

        self.results_frame = ttk.Frame(right)

        self.results_frame.pack(fill=tk.BOTH, expand=True)


        # status

        self.status_var = tk.StringVar(value="Ready")

        ttk.Label(master, textvariable=self.status_var).pack(side=tk.BOTTOM, fill=tk.X)


    def choose_folder(self):

        folder = filedialog.askdirectory()

        if not folder:

            return

        self.indexed_folder = folder

        self.folder_label.config(text=folder)

        self.status_var.set(f"Selected folder: {folder}")


    def index_folder(self):

        if not self.indexed_folder:

            messagebox.showwarning("Pick folder", "Choose images folder first")

            return

        self.status_var.set("Indexing folder (computing descriptors)...")

        self.master.update_idletasks()

        try:

            self.records = index_folder(self.indexed_folder, self.detector_tuple, cache_enabled=True)

            self.status_var.set(f"Indexed {len(self.records)} images")

            messagebox.showinfo("Indexed", f"Indexed {len(self.records)} images.")

        except Exception as e:

            messagebox.showerror("Indexing failed", str(e))

            self.status_var.set("Indexing failed")


    def reindex(self):

        if not self.indexed_folder:

            messagebox.showwarning("Pick folder", "Choose images folder first")

            return

        # delete cache and re-index

        try:

            if os.path.exists(CACHE_FILE):

                os.remove(CACHE_FILE)

        except:

            pass

        self.index_folder()


    def choose_query(self):

        q = filedialog.askopenfilename(filetypes=[("Images", "*.jpg *.jpeg *.png *.bmp *.tiff")])

        if not q:

            return

        self.query_path = Path(q)

        pil = pil_image_from_path(q)

        if pil is None:

            messagebox.showerror("Error", "Could not load image")

            return

        thumb = pil.copy()

        thumb.thumbnail(THUMB_SIZE)

        tkimg = ImageTk.PhotoImage(thumb)

        self.query_canvas.image = tkimg

        self.query_canvas.config(image=tkimg, text="")

        self.status_var.set(f"Query: {os.path.basename(q)}")

        # Run search if indexed

        if not self.records:

            if messagebox.askyesno("Not indexed", "Folder not indexed yet. Index now?"):

                self.index_folder()

            else:

                return

        self.search_query(q)


    def search_query(self, qpath):

        self.status_var.set("Searching for similar images...")

        self.master.update_idletasks()

        try:

            topk = max(1, int(self.topk_var.get()))

        except:

            topk = 6

        results = find_similar(qpath, self.records, self.detector_tuple, top_k=topk)

        # Clear previous results

        for w in self.results_frame.winfo_children():

            w.destroy()


        # Display results in grid

        cols = min(3, topk)

        r = 0; c = 0

        for idx, res in enumerate(results):

            path = res["path"]

            score = res["score"]

            matches = res["matches"]

            pil = pil_image_from_path(path)

            if pil is None:

                continue

            thumb = pil.copy()

            thumb.thumbnail(THUMB_SIZE)

            tkimg = ImageTk.PhotoImage(thumb)

            panel = ttk.Frame(self.results_frame, relief=tk.RIDGE, borderwidth=1)

            panel.grid(row=r, column=c, padx=6, pady=6, sticky="nsew")

            lbl = tk.Label(panel, image=tkimg)

            lbl.image = tkimg

            lbl.pack()

            info = ttk.Label(panel, text=f"{path.name}\nScore:{score:.3f}\nMatches:{matches}", anchor="center")

            info.pack()

            # click to open full image in default viewer

            def make_open(p=path):

                return lambda e=None: os.startfile(str(p)) if os.name == 'nt' else os.system(f'xdg-open "{p}"')

            lbl.bind("<Button-1>", make_open(path))

            c += 1

            if c >= cols:

                c = 0

                r += 1


        self.status_var.set("Search complete")


    def clear_cache(self):

        if os.path.exists(CACHE_FILE):

            try:

                os.remove(CACHE_FILE)

                messagebox.showinfo("Cache", "Cache file removed")

                self.status_var.set("Cache cleared")

            except Exception as e:

                messagebox.showerror("Error", f"Could not remove cache: {e}")

        else:

            messagebox.showinfo("Cache", "No cache file present")


# ----------------------

# Run

# ----------------------

def main():

    root = tk.Tk()

    app = ReverseImageSearchGUI(root)

    root.mainloop()


if __name__ == "__main__":

    main()


Smart Typing Speed Trainer

import tkinter as tk

from tkinter import ttk, messagebox, simpledialog

import time

import random

import json

import os


STATS_FILE = "typing_stats.json"


SAMPLE_PARAGRAPHS = {

    "Easy": [

        "The quick brown fox jumps over the lazy dog.",

        "Practice makes perfect. Keep typing every day.",

        "Sunrise over the hills gave a golden glow."

    ],

    "Medium": [

        "Learning to type faster requires consistent short sessions and focused practice.",

        "Productivity improves when repetitive tasks are automated and simplified.",

        "A small daily habit can compound into significant improvement over time."

    ],

    "Hard": [

        "Synthesis of knowledge emerges when creativity meets disciplined experimentation and reflection.",

        "Contributing to open-source projects accelerates learning through real-world code review and collaboration.",

        "Concurrency issues often surface under load where assumptions about ordering and state break down."

    ]

}


# -----------------------

# Utilities: stats file

# -----------------------

def load_stats():

    if not os.path.exists(STATS_FILE):

        return []

    try:

        with open(STATS_FILE, "r", encoding="utf-8") as f:

            return json.load(f)

    except Exception:

        return []


def save_stats(stats):

    with open(STATS_FILE, "w", encoding="utf-8") as f:

        json.dump(stats, f, indent=2)


# -----------------------

# Typing logic helpers

# -----------------------

def calc_wpm(char_count, elapsed_seconds):

    # Standard WPM uses 5 characters per word

    minutes = elapsed_seconds / 60.0 if elapsed_seconds > 0 else 1/60.0

    return (char_count / 5.0) / minutes


def calc_accuracy(correct_chars, total_typed):

    if total_typed == 0:

        return 0.0

    return (correct_chars / total_typed) * 100.0


# -----------------------

# Main App

# -----------------------

class TypingTrainerApp:

    def __init__(self, root):

        self.root = root

        self.root.title("Smart Typing Speed Trainer")

        self.root.geometry("900x600")


        self.difficulty = tk.StringVar(value="Easy")

        self.username = tk.StringVar(value="Guest")

        self.paragraph = ""

        self.start_time = None

        self.end_time = None

        self.running = False

        self.correct_chars = 0

        self.total_typed = 0

        self.errors = 0


        self.stats = load_stats()


        self.build_ui()

        self.new_paragraph()


    def build_ui(self):

        top = ttk.Frame(self.root)

        top.pack(side=tk.TOP, fill=tk.X, padx=8, pady=8)


        ttk.Label(top, text="Name:").pack(side=tk.LEFT)

        name_entry = ttk.Entry(top, textvariable=self.username, width=18)

        name_entry.pack(side=tk.LEFT, padx=6)


        ttk.Label(top, text="Difficulty:").pack(side=tk.LEFT, padx=(12,0))

        diff_menu = ttk.OptionMenu(top, self.difficulty, self.difficulty.get(), *SAMPLE_PARAGRAPHS.keys())

        diff_menu.pack(side=tk.LEFT, padx=6)


        ttk.Button(top, text="Next Paragraph", command=self.new_paragraph).pack(side=tk.LEFT, padx=6)

        ttk.Button(top, text="Restart", command=self.restart).pack(side=tk.LEFT, padx=6)

        ttk.Button(top, text="Save Result", command=self.save_result).pack(side=tk.LEFT, padx=6)

        ttk.Button(top, text="Leaderboard", command=self.show_leaderboard).pack(side=tk.LEFT, padx=6)

        ttk.Button(top, text="Clear Stats", command=self.clear_stats).pack(side=tk.RIGHT, padx=6)


        # Paragraph display

        paragraph_frame = ttk.LabelFrame(self.root, text="Type the text below")

        paragraph_frame.pack(fill=tk.BOTH, expand=False, padx=10, pady=8)


        self.paragraph_text = tk.Text(paragraph_frame, height=6, wrap="word", font=("Consolas", 14), padx=8, pady=8, state="disabled")

        self.paragraph_text.pack(fill=tk.BOTH, expand=True)


        # Typing area

        typing_frame = ttk.LabelFrame(self.root, text="Your typing")

        typing_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=8)


        self.typing_text = tk.Text(typing_frame, height=10, wrap="word", font=("Consolas", 14), padx=8, pady=8)

        self.typing_text.pack(fill=tk.BOTH, expand=True)

        self.typing_text.bind("<Key>", self.on_key_press)

        # Disable paste

        self.typing_text.bind("<<Paste>>", lambda e: "break")

        self.typing_text.bind("<Control-v>", lambda e: "break")

        self.typing_text.bind("<Button-2>", lambda e: "break")


        # Stats

        stats_frame = ttk.Frame(self.root)

        stats_frame.pack(fill=tk.X, padx=10, pady=6)


        self.wpm_var = tk.StringVar(value="WPM: 0.0")

        self.acc_var = tk.StringVar(value="Accuracy: 0.0%")

        self.err_var = tk.StringVar(value="Errors: 0")

        self.time_var = tk.StringVar(value="Time: 0.0s")


        ttk.Label(stats_frame, textvariable=self.wpm_var, font=("Arial", 12)).pack(side=tk.LEFT, padx=8)

        ttk.Label(stats_frame, textvariable=self.acc_var, font=("Arial", 12)).pack(side=tk.LEFT, padx=8)

        ttk.Label(stats_frame, textvariable=self.err_var, font=("Arial", 12)).pack(side=tk.LEFT, padx=8)

        ttk.Label(stats_frame, textvariable=self.time_var, font=("Arial", 12)).pack(side=tk.LEFT, padx=8)


        # Progress / highlight correct vs incorrect

        lower = ttk.Frame(self.root)

        lower.pack(fill=tk.BOTH, expand=False, padx=10, pady=6)

        ttk.Label(lower, text="Tips: Start typing to begin the timer. Pasting is disabled.").pack(side=tk.LEFT)


    def new_paragraph(self):

        self.pause()

        diff = self.difficulty.get()

        pool = SAMPLE_PARAGRAPHS.get(diff, SAMPLE_PARAGRAPHS["Easy"])

        self.paragraph = random.choice(pool)

        # show paragraph readonly

        self.paragraph_text.config(state="normal")

        self.paragraph_text.delete("1.0", tk.END)

        self.paragraph_text.insert(tk.END, self.paragraph)

        self.paragraph_text.config(state="disabled")

        self.restart()


    def restart(self):

        self.pause()

        self.typing_text.delete("1.0", tk.END)

        self.start_time = None

        self.end_time = None

        self.running = False

        self.correct_chars = 0

        self.total_typed = 0

        self.errors = 0

        self.update_stats_display(0.0)

        self.typing_text.focus_set()


    def pause(self):

        self.running = False


    def on_key_press(self, event):

        # ignore non-printing keys that don't change text (Shift, Ctrl, Alt)

        if event.keysym in ("Shift_L","Shift_R","Control_L","Control_R","Alt_L","Alt_R","Caps_Lock","Tab","Escape"):

            return

        # start timer on first real key

        if not self.running:

            self.start_time = time.time()

            self.running = True

            # schedule first update

            self.root.after(100, self.periodic_update)


        # schedule update immediately after Tk has applied the key to the widget

        self.root.after(1, self.evaluate_typing)


    def evaluate_typing(self):

        typed = self.typing_text.get("1.0", tk.END)[:-1]  # drop trailing newline Tk adds

        target = self.paragraph


        # compute per-character correctness up to typed length

        total = len(typed)

        correct = 0

        errors = 0


        for i, ch in enumerate(typed):

            if i < len(target) and ch == target[i]:

                correct += 1

            else:

                errors += 1


        # count omissions beyond target length as errors too if user keeps typing

        if total > len(target):

            errors += total - len(target)


        self.correct_chars = correct

        self.total_typed = total

        self.errors = errors


        # if user finished (typed length equals paragraph length), stop timer

        if total >= len(target):

            # check final correctness

            if correct == len(target):

                self.end_time = time.time()

                self.running = False

                self.update_stats_display(final=True)

                messagebox.showinfo("Completed", f"Well done, {self.username.get()}!\nYou finished the paragraph.")

                return


        # otherwise update live stats

        self.update_stats_display(final=False)


    def periodic_update(self):

        if self.running:

            self.update_stats_display(final=False)

            self.root.after(200, self.periodic_update)


    def update_stats_display(self, final=False):

        elapsed = (self.end_time - self.start_time) if (self.start_time and self.end_time) else (time.time() - self.start_time if self.start_time else 0.0)

        wpm = calc_wpm(self.total_typed, elapsed) if self.total_typed > 0 else 0.0

        acc = calc_accuracy(self.correct_chars, self.total_typed) if self.total_typed > 0 else 0.0


        self.wpm_var.set(f"WPM: {wpm:.1f}")

        self.acc_var.set(f"Accuracy: {acc:.1f}%")

        self.err_var.set(f"Errors: {self.errors}")

        self.time_var.set(f"Time: {elapsed:.1f}s")


        if final and self.start_time:

            # show final summary and auto-save to stats (ask username)

            pass


    def save_result(self):

        if not self.start_time:

            messagebox.showwarning("No attempt", "Start typing first to record a result.")

            return

        # If still running, finalize end time

        if self.running:

            self.end_time = time.time()

            self.running = False

            self.evaluate_typing()


        elapsed = (self.end_time - self.start_time) if (self.start_time and self.end_time) else 0.0

        wpm = calc_wpm(self.total_typed, elapsed) if elapsed > 0 else 0.0

        acc = calc_accuracy(self.correct_chars, self.total_typed) if self.total_typed > 0 else 0.0


        name = self.username.get().strip() or "Guest"

        record = {

            "name": name,

            "difficulty": self.difficulty.get(),

            "wpm": round(wpm, 1),

            "accuracy": round(acc, 1),

            "errors": int(self.errors),

            "chars_typed": int(self.total_typed),

            "time_seconds": round(elapsed, 1),

            "paragraph": self.paragraph

        }

        self.stats.append(record)

        # keep only latest 200

        self.stats = self.stats[-200:]

        save_stats(self.stats)

        messagebox.showinfo("Saved", f"Result saved for {name}.\nWPM={record['wpm']}, Accuracy={record['accuracy']}%")

        # refresh leaderboard

        self.show_leaderboard()


    def show_leaderboard(self):

        lb_win = tk.Toplevel(self.root)

        lb_win.title("Leaderboard / Recent Attempts")

        lb_win.geometry("700x400")

        frame = ttk.Frame(lb_win)

        frame.pack(fill=tk.BOTH, expand=True, padx=8, pady=8)


        cols = ("name", "difficulty", "wpm", "accuracy", "errors", "time_seconds")

        tree = ttk.Treeview(frame, columns=cols, show="headings")

        for c in cols:

            tree.heading(c, text=c.capitalize())

            tree.column(c, width=100, anchor="center")

        tree.pack(fill=tk.BOTH, expand=True)


        # sort by wpm desc

        sorted_stats = sorted(self.stats, key=lambda r: r.get("wpm", 0), reverse=True)

        for rec in sorted_stats:

            tree.insert("", "end", values=(rec["name"], rec["difficulty"], rec["wpm"], rec["accuracy"], rec["errors"], rec["time_seconds"]))


        btn_frame = ttk.Frame(lb_win)

        btn_frame.pack(fill=tk.X, pady=6)

        ttk.Button(btn_frame, text="Close", command=lb_win.destroy).pack(side=tk.RIGHT, padx=6)


    def clear_stats(self):

        if messagebox.askyesno("Confirm", "Clear all saved stats? This cannot be undone."):

            self.stats = []

            save_stats(self.stats)

            messagebox.showinfo("Cleared", "All saved stats removed.")


# -----------------------

# Run

# -----------------------

def main():

    root = tk.Tk()

    app = TypingTrainerApp(root)

    root.mainloop()


if __name__ == "__main__":

    main()


Image Style Transfer

 """

Image Style Transfer demo (TensorFlow Hub + Tkinter UI)


- Pick a content image (photo) and a style image (painting).

- Apply Magenta arbitrary-image-stylization-v1-256 model.

- Preview and save the stylized result.


Requirements:

    pip install tensorflow tensorflow-hub pillow opencv-python

"""


import tkinter as tk

from tkinter import ttk, filedialog, messagebox

from PIL import Image, ImageTk

import numpy as np

import tensorflow as tf

import tensorflow_hub as hub

import cv2

import os


# Model URL

TFHUB_MODEL_URL = "https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2"


# --------------------------

# Helper functions

# --------------------------

def load_img(path, max_dim=512):

    """Load image with PIL, resize keeping aspect ratio so long side = max_dim."""

    img = Image.open(path).convert("RGB")

    # resize

    long = max(img.size)

    if long > max_dim:

        scale = max_dim / long

        new_size = (int(img.size[0]*scale), int(img.size[1]*scale))

        img = img.resize(new_size, Image.ANTIALIAS)

    return img


def img_to_tensor(img):

    """PIL Image -> float32 tensor shape [1, H, W, 3] in [0,1]."""

    arr = np.array(img).astype(np.float32) / 255.0

    # add batch dim

    return tf.expand_dims(arr, axis=0)


def tensor_to_pil(tensor):

    """Tensor [1,H,W,3] in [0,1] -> PIL Image."""

    arr = tensor[0].numpy()

    arr = np.clip(arr * 255.0, 0, 255).astype(np.uint8)

    return Image.fromarray(arr)


# --------------------------

# Load model once

# --------------------------

print("Loading style transfer model from TensorFlow Hub...")

model = hub.load(TFHUB_MODEL_URL)

print("Model loaded.")


# --------------------------

# Simple CLI function (optional)

# --------------------------

def stylize_image(content_img_path, style_img_path, output_path="stylized.png", max_dim=512):

    content_img = load_img(content_img_path, max_dim=max_dim)

    style_img = load_img(style_img_path, max_dim=max_dim)

    content_t = img_to_tensor(content_img)

    style_t = img_to_tensor(style_img)

    # The hub model expects float tensors in [0,1]

    outputs = model(tf.constant(content_t), tf.constant(style_t))

    stylized = outputs[0]

    pil = tensor_to_pil(stylized)

    pil.save(output_path)

    print(f"Saved stylized image to {output_path}")

    return output_path


# --------------------------

# Tkinter GUI

# --------------------------

class StyleTransferGUI:

    def __init__(self, root):

        self.root = root

        root.title("AI Image Style Transfer — RootRace")

        root.geometry("1000x700")


        self.content_path = None

        self.style_path = None

        self.result_image = None  # PIL Image


        # Controls frame

        ctrl = ttk.Frame(root)

        ctrl.pack(side=tk.TOP, fill=tk.X, padx=8, pady=8)


        ttk.Button(ctrl, text="Choose Content Image", command=self.choose_content).pack(side=tk.LEFT, padx=4)

        ttk.Button(ctrl, text="Choose Style Image", command=self.choose_style).pack(side=tk.LEFT, padx=4)

        ttk.Button(ctrl, text="Apply Style Transfer", command=self.apply_style).pack(side=tk.LEFT, padx=8)

        ttk.Button(ctrl, text="Save Result", command=self.save_result).pack(side=tk.LEFT, padx=4)

        ttk.Button(ctrl, text="Quit", command=root.quit).pack(side=tk.RIGHT, padx=4)


        # Preview frames

        preview = ttk.Frame(root)

        preview.pack(fill=tk.BOTH, expand=True)


        # Left: content & style thumbnails

        left = ttk.Frame(preview)

        left.pack(side=tk.LEFT, fill=tk.Y, padx=6, pady=6)


        ttk.Label(left, text="Content Image").pack()

        self.content_label = ttk.Label(left)

        self.content_label.pack(padx=6, pady=6)


        ttk.Label(left, text="Style Image").pack()

        self.style_label = ttk.Label(left)

        self.style_label.pack(padx=6, pady=6)


        # Right: result canvas

        right = ttk.Frame(preview)

        right.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True, padx=6, pady=6)

        ttk.Label(right, text="Result").pack()

        self.canvas = tk.Canvas(right, bg="black")

        self.canvas.pack(fill=tk.BOTH, expand=True)


        # Status

        self.status = ttk.Label(root, text="Load a content and style image to start.", relief=tk.SUNKEN, anchor="w")

        self.status.pack(side=tk.BOTTOM, fill=tk.X)


    def choose_content(self):

        path = filedialog.askopenfilename(filetypes=[("Images","*.jpg *.jpeg *.png *.bmp"),("All files","*.*")])

        if not path:

            return

        self.content_path = path

        img = load_img(path, max_dim=400)

        self._set_thumbnail(self.content_label, img)

        self.status.config(text=f"Selected content: {os.path.basename(path)}")


    def choose_style(self):

        path = filedialog.askopenfilename(filetypes=[("Images","*.jpg *.jpeg *.png *.bmp"),("All files","*.*")])

        if not path:

            return

        self.style_path = path

        img = load_img(path, max_dim=200)

        self._set_thumbnail(self.style_label, img)

        self.status.config(text=f"Selected style: {os.path.basename(path)}")


    def _set_thumbnail(self, widget, pil_img):

        tk_img = ImageTk.PhotoImage(pil_img)

        widget.image = tk_img  # keep ref

        widget.config(image=tk_img)


    def apply_style(self):

        if not self.content_path or not self.style_path:

            messagebox.showwarning("Missing images", "Please choose both content and style images.")

            return

        try:

            self.status.config(text="Running style transfer... (this may take a few seconds)")

            self.root.update_idletasks()


            content_img = load_img(self.content_path, max_dim=512)

            style_img = load_img(self.style_path, max_dim=512)

            content_t = img_to_tensor(content_img)

            style_t = img_to_tensor(style_img)


            outputs = model(tf.constant(content_t), tf.constant(style_t))

            stylized = outputs[0]  # [1,H,W,3]

            pil = tensor_to_pil(stylized)

            self.result_image = pil


            # display result on canvas, scaled to fit

            self._display_result_on_canvas(pil)

            self.status.config(text="Done. You can save the result.")

        except Exception as e:

            messagebox.showerror("Error", f"Style transfer failed: {e}")

            self.status.config(text="Error during style transfer")


    def _display_result_on_canvas(self, pil_img):

        # resize to fit canvas while preserving aspect ratio

        cw = self.canvas.winfo_width() or 600

        ch = self.canvas.winfo_height() or 400

        w,h = pil_img.size

        scale = min(cw/w, ch/h, 1.0)

        new_size = (int(w*scale), int(h*scale))

        disp = pil_img.resize(new_size, Image.ANTIALIAS)

        tk_img = ImageTk.PhotoImage(disp)

        self.canvas.image = tk_img

        self.canvas.delete("all")

        # center image

        x = (cw - new_size[0]) // 2

        y = (ch - new_size[1]) // 2

        self.canvas.create_image(x, y, anchor="nw", image=tk_img)


    def save_result(self):

        if self.result_image is None:

            messagebox.showwarning("No result", "No stylized image to save. Apply style transfer first.")

            return

        path = filedialog.asksaveasfilename(defaultextension=".png", filetypes=[("PNG","*.png"),("JPEG","*.jpg")])

        if not path:

            return

        self.result_image.save(path)

        messagebox.showinfo("Saved", f"Saved stylized image to {path}")

        self.status.config(text=f"Saved: {path}")


# --------------------------

# CLI usage entrypoint

# --------------------------

def main():

    root = tk.Tk()

    app = StyleTransferGUI(root)

    root.mainloop()


if __name__ == "__main__":

    main()

Audio Frequency Spectrum Visualizer

 """

Audio Frequency Spectrum Visualizer (Tkinter + matplotlib)


- Select a WAV file

- Shows waveform (top) and animated FFT spectrum bars (bottom)

- Works with mono or stereo WAV files

- Uses scipy.io.wavfile to read WAV

- Animation scrolls through the audio buffer, computing FFT per frame to simulate live visualization


Limitations:

- Only WAV supported out of the box. For MP3, use pydub to convert to raw samples (instructions below).

- No audio playback included by default (keeps dependencies minimal). See comments for how to add playback.

"""


import tkinter as tk

from tkinter import ttk, filedialog, messagebox

import numpy as np

import matplotlib

matplotlib.use("TkAgg")

import matplotlib.pyplot as plt

from matplotlib.animation import FuncAnimation

from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg

from scipy.io import wavfile

import os


# -----------------------

# Visualization settings

# -----------------------

FRAME_SIZE = 2048        # samples per frame for FFT (power of two recommended)

HOP_SIZE = 1024          # step between frames (overlap)

SPECTRUM_BINS = 80       # number of bars to display

SAMPLE_REDUCE = 1        # reduce sampling rate by factor (keep 1 unless memory issue)


# -----------------------

# Helper utilities

# -----------------------

def load_wav_file(path):

    """Read WAV file and return (rate, samples as float32 mono)."""

    rate, data = wavfile.read(path)

    # normalize and convert to float32 in range [-1,1]

    if data.dtype == np.int16:

        data = data.astype(np.float32) / 32768.0

    elif data.dtype == np.int32:

        data = data.astype(np.float32) / 2147483648.0

    elif data.dtype == np.uint8:

        data = (data.astype(np.float32) - 128) / 128.0

    else:

        data = data.astype(np.float32)


    # if stereo, convert to mono by averaging channels

    if data.ndim == 2:

        data = data.mean(axis=1)


    # optionally downsample (by integer factor)

    if SAMPLE_REDUCE > 1:

        data = data[::SAMPLE_REDUCE]

        rate = rate // SAMPLE_REDUCE


    return rate, data


def compute_spectrum(frame, rate, n_bins=SPECTRUM_BINS):

    """

    Compute FFT amplitude spectrum for a frame (1D array).

    Returns frequency centers and amplitudes (log-scaled).

    """

    # apply a window to reduce spectral leakage

    window = np.hanning(len(frame))

    frame_windowed = frame * window

    # FFT

    fft = np.fft.rfft(frame_windowed, n=FRAME_SIZE)

    mags = np.abs(fft)

    # convert to dB scale

    mags_db = 20 * np.log10(mags + 1e-6)

    freqs = np.fft.rfftfreq(FRAME_SIZE, d=1.0 / rate)

    # reduce to n_bins by averaging contiguous bands (log spaced bins could be better)

    # we'll use linear bins for simplicity

    bins = np.array_split(np.arange(len(freqs)), n_bins)

    bfreqs = []

    bampl = []

    for b in bins:

        if len(b) == 0:

            bfreqs.append(0)

            bampl.append(-120.0)

            continue

        bfreqs.append(freqs[b].mean())

        bampl.append(mags_db[b].mean())

    # normalize amplitudes to 0..1 for plotting bars

    a = np.array(bampl)

    a = (a - a.min()) / (np.maximum(a.max() - a.min(), 1e-6))

    return np.array(bfreqs), a


# -----------------------

# Main App

# -----------------------

class SpectrumVisualizerApp:

    def __init__(self, root):

        self.root = root

        self.root.title("Audio Frequency Spectrum Visualizer")

        self.root.geometry("1000x650")


        # Top controls

        ctrl = ttk.Frame(root)

        ctrl.pack(side=tk.TOP, fill=tk.X, padx=8, pady=6)


        self.path_var = tk.StringVar()

        ttk.Label(ctrl, text="Audio File:").pack(side=tk.LEFT)

        self.path_entry = ttk.Entry(ctrl, textvariable=self.path_var, width=60)

        self.path_entry.pack(side=tk.LEFT, padx=6)

        ttk.Button(ctrl, text="Browse", command=self.browse_file).pack(side=tk.LEFT, padx=4)

        ttk.Button(ctrl, text="Load", command=self.load_file).pack(side=tk.LEFT, padx=4)

        ttk.Button(ctrl, text="Start", command=self.start).pack(side=tk.LEFT, padx=4)

        ttk.Button(ctrl, text="Pause/Resume", command=self.toggle_pause).pack(side=tk.LEFT, padx=4)

        ttk.Button(ctrl, text="Stop", command=self.stop).pack(side=tk.LEFT, padx=4)


        # Status

        self.status_var = tk.StringVar(value="No file loaded")

        ttk.Label(root, textvariable=self.status_var).pack(side=tk.TOP, anchor="w", padx=8)


        # Matplotlib figure with two subplots (waveform + spectrum)

        self.fig, (self.ax_wave, self.ax_spec) = plt.subplots(2, 1, figsize=(9, 6), gridspec_kw={'height_ratios':[1,1]})

        plt.tight_layout(pad=3.0)


        self.canvas = FigureCanvasTkAgg(self.fig, master=root)

        self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)


        # controls / internal state

        self.sr = None

        self.samples = None

        self.num_frames = 0

        self.current_frame_idx = 0

        self.paused = True

        self.ani = None


        # waveform plot placeholders

        self.wave_x = None

        self.wave_plot = None

        # spectrum bars

        self.bar_container = None

        self.freq_centers = None


        # initialize plots

        self.setup_empty_plots()


    def setup_empty_plots(self):

        self.ax_wave.clear()

        self.ax_wave.set_title("Waveform (time domain)")

        self.ax_wave.set_xlabel("Time (s)")

        self.ax_wave.set_ylabel("Amplitude")

        self.ax_spec.clear()

        self.ax_spec.set_title("Frequency Spectrum (animated)")

        self.ax_spec.set_xlabel("Frequency (Hz)")

        self.ax_spec.set_ylabel("Normalized amplitude")

        self.canvas.draw_idle()


    def browse_file(self):

        p = filedialog.askopenfilename(filetypes=[("WAV files", "*.wav"), ("All files", "*.*")])

        if p:

            self.path_var.set(p)


    def load_file(self):

        path = self.path_var.get().strip()

        if not path or not os.path.exists(path):

            messagebox.showerror("Error", "Please choose a valid WAV file.")

            return

        try:

            self.sr, self.samples = load_wav_file(path)

        except Exception as e:

            messagebox.showerror("Error", f"Failed to read WAV: {e}")

            return


        # compute frames count

        self.num_frames = max(1, (len(self.samples) - FRAME_SIZE) // HOP_SIZE + 1)

        self.current_frame_idx = 0

        self.status_var.set(f"Loaded: {os.path.basename(path)} | SR={self.sr} Hz | Samples={len(self.samples)} | Frames={self.num_frames}")


        # draw full waveform

        t = np.arange(len(self.samples)) / float(self.sr)

        self.ax_wave.clear()

        self.ax_wave.plot(t, self.samples, color='gray', linewidth=0.5)

        self.ax_wave.set_xlim(t.min(), t.max())

        self.ax_wave.set_ylim(-1.0, 1.0)

        self.ax_wave.set_title("Waveform (time domain)")

        self.ax_wave.set_xlabel("Time (s)")

        self.ax_wave.set_ylabel("Amplitude")


        # prepare spectrum bar placeholders using first frame

        frame0 = self.samples[:FRAME_SIZE]

        freqs, amps = compute_spectrum(frame0, self.sr, n_bins=SPECTRUM_BINS)

        self.freq_centers = freqs

        x = np.arange(len(amps))

        self.ax_spec.clear()

        self.bar_container = self.ax_spec.bar(x, amps, align='center', alpha=0.8)

        self.ax_spec.set_xticks(x[::max(1,len(x)//10)])

        # show frequency labels at a few ticks

        tick_idx = np.linspace(0, len(freqs)-1, min(10, len(freqs))).astype(int)

        tick_labels = [f"{int(freqs[i])}Hz" for i in tick_idx]

        self.ax_spec.set_xticks(tick_idx)

        self.ax_spec.set_xticklabels(tick_labels, rotation=45)

        self.ax_spec.set_ylim(0, 1.02)

        self.ax_spec.set_title("Frequency Spectrum (animated)")


        # vertical line on waveform to show current frame

        self.wave_marker = self.ax_wave.axvline(0, color='red', linewidth=1.0)


        self.canvas.draw_idle()


    def start(self):

        if self.samples is None:

            messagebox.showwarning("No file", "Load a WAV file first.")

            return

        if self.ani:

            # reset animation

            self.ani.event_source.stop()

            self.ani = None

        self.paused = False

        # Create animation that updates every ~30 ms

        self.ani = FuncAnimation(self.fig, self.update_frame, interval=30, blit=False)

        self.canvas.draw_idle()

        self.status_var.set("Playing visualization... (simulation)")


    def toggle_pause(self):

        if self.ani is None:

            return

        if self.paused:

            self.paused = False

            self.status_var.set("Resumed")

        else:

            self.paused = True

            self.status_var.set("Paused")


    def stop(self):

        if self.ani:

            self.ani.event_source.stop()

            self.ani = None

        self.current_frame_idx = 0

        self.paused = True

        self.status_var.set("Stopped")

        # reset waveform marker

        if hasattr(self, 'wave_marker') and self.wave_marker:

            self.wave_marker.set_xdata(0)

        self.canvas.draw_idle()


    def update_frame(self, *args):

        if self.samples is None or self.paused:

            return


        # compute frame start/end

        start = int(self.current_frame_idx * HOP_SIZE)

        end = start + FRAME_SIZE

        if end > len(self.samples):

            # loop to start for continuous visual demo

            self.current_frame_idx = 0

            start = 0

            end = FRAME_SIZE


        frame = self.samples[start:end]

        # update waveform marker

        t_pos = (start + FRAME_SIZE/2) / float(self.sr)

        self.wave_marker.set_xdata(t_pos)


        # compute spectrum

        freqs, amps = compute_spectrum(frame, self.sr, n_bins=SPECTRUM_BINS)


        # update bars

        for rect, h in zip(self.bar_container, amps):

            rect.set_height(h)


        # optional: change bar colors based on amplitude

        for rect, h in zip(self.bar_container, amps):

            rect.set_color(plt.cm.viridis(h))


        self.current_frame_idx += 1

        if self.current_frame_idx >= self.num_frames:

            self.current_frame_idx = 0  # loop


        self.canvas.draw_idle()


# -----------------------

# Run the app

# -----------------------

def main():

    root = tk.Tk()

    app = SpectrumVisualizerApp(root)

    root.mainloop()


if __name__ == "__main__":

    main()