Braille Translator Tool

import tkinter as tk

from tkinter import filedialog, messagebox

from PIL import Image, ImageDraw, ImageTk

import math

import os


# -------------------------

# Braille mapping utilities

# -------------------------


# Dot-number definitions for letters a..z (Grade-1 braille)

LETTER_DOTS = {

    'a': [1],

    'b': [1,2],

    'c': [1,4],

    'd': [1,4,5],

    'e': [1,5],

    'f': [1,2,4],

    'g': [1,2,4,5],

    'h': [1,2,5],

    'i': [2,4],

    'j': [2,4,5],

    'k': [1,3],

    'l': [1,2,3],

    'm': [1,3,4],

    'n': [1,3,4,5],

    'o': [1,3,5],

    'p': [1,2,3,4],

    'q': [1,2,3,4,5],

    'r': [1,2,3,5],

    's': [2,3,4],

    't': [2,3,4,5],

    'u': [1,3,6],

    'v': [1,2,3,6],

    'w': [2,4,5,6],

    'x': [1,3,4,6],

    'y': [1,3,4,5,6],

    'z': [1,3,5,6],

}


# Common punctuation (Grade-1)

PUNCT_DOTS = {

    ',': [2],

    ';': [2,3],

    ':': [2,4],

    '.': [2,5,6],

    '?': [2,6],

    '!': [2,3,5],

    '(': [2,3,6,5],  # open parenthesis commonly encoded as ⠶ (but implementations vary)

    ')': [3,5,6,2],  # mirrored / alternative — we'll use same as '(' for simplicity

    "'": [3],

    '-': [3,6],

    '/': [3,4],

    '"': [5,6,2,3],  # approximate

    '@': [4,1],      # uncommon; approximate

    '#': [3,4,5,6],  # number sign (we will use official number sign below)

}


# Braille special signs

NUMBER_SIGN = [3,4,5,6]   # ⠼

CAPITAL_SIGN = [6]        # prefix for capital (single capital) — optional use

SPACE = []                # no dots for space -> unicode U+2800


# Build dot -> Unicode mapping utility

def dots_to_braille_unicode(dots):

    """

    dots: list of integers 1..8 (we use 1..6)

    returns: single unicode braille character

    """

    code = 0x2800

    for d in dots:

        if 1 <= d <= 8:

            code += 1 << (d - 1)

    return chr(code)


# Precompute maps

LETTER_TO_BRAILLE = {ch: dots_to_braille_unicode(dots) for ch, dots in LETTER_DOTS.items()}

PUNCT_TO_BRAILLE = {p: dots_to_braille_unicode(dots) for p, dots in PUNCT_DOTS.items()}

NUMBER_SIGN_CHAR = dots_to_braille_unicode(NUMBER_SIGN)

CAPITAL_SIGN_CHAR = dots_to_braille_unicode(CAPITAL_SIGN)

SPACE_CHAR = chr(0x2800)


# Digits mapping in Grade-1: number sign + letters a-j represents 1-0

DIGIT_TO_LETTER = {

    '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e',

    '6': 'f', '7': 'g', '8': 'h', '9': 'i', '0': 'j'

}


# -------------------------

# Translation function

# -------------------------

def translate_to_braille(text, use_capital_prefix=True, use_number_prefix=True):

    """

    Translate plain text into Grade-1 Braille Unicode string.

    Options:

      - use_capital_prefix: if True, prefix capitals with the capital sign (⠠)

      - use_number_prefix: if True, prefix digit sequences with number sign (⠼)

    Returns braille_unicode_string

    """

    out = []

    i = 0

    n = len(text)

    while i < n:

        ch = text[i]

        if ch.isspace():

            out.append(SPACE_CHAR)

            i += 1

            continue


        # Digit sequence handling

        if ch.isdigit():

            if use_number_prefix:

                out.append(NUMBER_SIGN_CHAR)

            # consume contiguous digits

            while i < n and text[i].isdigit():

                d = text[i]

                letter_equiv = DIGIT_TO_LETTER.get(d, None)

                if letter_equiv:

                    out.append(LETTER_TO_BRAILLE[letter_equiv])

                else:

                    # fallback: space for unknown

                    out.append(SPACE_CHAR)

                i += 1

            continue


        # Letter

        if ch.isalpha():

            if ch.isupper():

                if use_capital_prefix:

                    out.append(CAPITAL_SIGN_CHAR)

                ch_low = ch.lower()

            else:

                ch_low = ch

            code = LETTER_TO_BRAILLE.get(ch_low)

            if code:

                out.append(code)

            else:

                out.append(SPACE_CHAR)

            i += 1

            continue


        # Punctuation

        if ch in PUNCT_TO_BRAILLE:

            out.append(PUNCT_TO_BRAILLE[ch])

            i += 1

            continue


        # Fallback: try common mapping for punctuation by replacement

        if ch == '"':

            out.append(PUNCT_TO_BRAILLE.get('"', SPACE_CHAR))

            i += 1

            continue


        # Unknown character: attempt to include as space placeholder

        out.append(SPACE_CHAR)

        i += 1


    return "".join(out)


# -------------------------

# Braille image rendering

# -------------------------

def render_braille_image(braille_text, dot_radius=8, dot_gap=10, cell_gap=16, bg_color=(255,255,255)):

    """

    Render braille_text (unicode braille characters) into a PIL Image.

    Each braille cell is 2 (columns) x 3 (rows) of dots.

    We read the Unicode braille codepoints and draw filled circles for active dots.

    Returns PIL.Image (RGB).

    """

    # Compute rows & columns: we'll wrap to a max columns per line for reasonable width

    max_cols = 40  # characters per row, adjust if needed


    # Split into lines by breaking long strings

    chars = list(braille_text)

    lines = [chars[i:i+max_cols] for i in range(0, len(chars), max_cols)]


    # cell size

    cell_w = dot_radius*2 + dot_gap

    cell_h = dot_radius*3 + dot_gap*2  # 3 rows

    img_w = len(lines[0]) * (cell_w + cell_gap) + 2*cell_gap if lines else 200

    img_h = len(lines) * (cell_h + cell_gap) + 2*cell_gap if lines else 100


    img = Image.new("RGB", (img_w, img_h), color=bg_color)

    draw = ImageDraw.Draw(img)


    for row_idx, line in enumerate(lines):

        for col_idx, ch in enumerate(line):

            x0 = cell_gap + col_idx * (cell_w + cell_gap)

            y0 = cell_gap + row_idx * (cell_h + cell_gap)

            # Determine dot pattern from unicode char

            codepoint = ord(ch)

            base = 0x2800

            mask = codepoint - base

            # dot positions for 1..6 are arranged:

            # (col0,row0)=dot1  (col1,row0)=dot4

            # (col0,row1)=dot2  (col1,row1)=dot5

            # (col0,row2)=dot3  (col1,row2)=dot6

            dot_positions = [

                (0,0,1),  # dot1

                (0,1,2),  # dot2

                (0,2,3),  # dot3

                (1,0,4),  # dot4

                (1,1,5),  # dot5

                (1,2,6),  # dot6

            ]

            for col, r, dotn in dot_positions:

                bit = (mask >> (dotn-1)) & 1

                cx = x0 + col * (dot_radius + dot_gap/2) + dot_radius + 4

                cy = y0 + r * (dot_radius + dot_gap/2) + dot_radius + 4

                bbox = [cx - dot_radius, cy - dot_radius, cx + dot_radius, cy + dot_radius]

                if bit:

                    draw.ellipse(bbox, fill=(0,0,0))

                else:

                    # draw faint circle to indicate empty dot (optional)

                    draw.ellipse(bbox, outline=(200,200,200))

    return img


# -------------------------

# GUI

# -------------------------

class BrailleGUI:

    def __init__(self, root):

        self.root = root

        root.title("Braille Translator Tool — Grade-1 (Uncontracted)")

        root.geometry("820x520")


        # Input frame

        frame_in = tk.LabelFrame(root, text="Input Text", padx=8, pady=8)

        frame_in.pack(fill="both", padx=12, pady=8)


        self.text_input = tk.Text(frame_in, height=6, wrap="word", font=("Arial", 12))

        self.text_input.pack(fill="both", expand=True)

        self.text_input.insert("1.0", "Hello, World! 123")


        # Controls

        ctrl = tk.Frame(root)

        ctrl.pack(fill="x", padx=12)

        tk.Button(ctrl, text="Translate", command=self.on_translate).pack(side="left", padx=6, pady=6)

        tk.Button(ctrl, text="Render Braille Image (Preview)", command=self.on_render_preview).pack(side="left", padx=6, pady=6)

        tk.Button(ctrl, text="Save Braille Image...", command=self.on_save_image).pack(side="left", padx=6, pady=6)

        tk.Button(ctrl, text="Copy Braille Unicode to Clipboard", command=self.on_copy_clipboard).pack(side="left", padx=6, pady=6)


        # Output frame (braille unicode text)

        frame_out = tk.LabelFrame(root, text="Braille (Unicode)", padx=8, pady=8)

        frame_out.pack(fill="both", padx=12, pady=8, expand=True)


        self.braille_text_widget = tk.Text(frame_out, height=6, wrap="word", font=("Segoe UI Symbol", 20))

        self.braille_text_widget.pack(fill="both", expand=True)

        self.braille_text_widget.config(state="disabled")


        # Image preview area

        preview_frame = tk.LabelFrame(root, text="Image Preview", padx=8, pady=8)

        preview_frame.pack(fill="both", padx=12, pady=8)

        self.preview_label = tk.Label(preview_frame)

        self.preview_label.pack()

        self.last_preview_image = None  # keep reference to avoid GC


    def on_translate(self):

        txt = self.text_input.get("1.0", "end").rstrip("\n")

        if not txt.strip():

            messagebox.showwarning("Input required", "Please enter some text to translate.")

            return

        braille = translate_to_braille(txt)

        self.braille_text_widget.config(state="normal")

        self.braille_text_widget.delete("1.0", "end")

        self.braille_text_widget.insert("1.0", braille)

        self.braille_text_widget.config(state="disabled")


    def on_render_preview(self):

        braille = self.braille_text_widget.get("1.0", "end").rstrip("\n")

        if not braille:

            messagebox.showinfo("No Braille", "Translate text first (click Translate).")

            return

        img = render_braille_image(braille, dot_radius=8, dot_gap=10, cell_gap=14)

        self.show_preview(img)


    def on_save_image(self):

        braille = self.braille_text_widget.get("1.0", "end").rstrip("\n")

        if not braille:

            messagebox.showinfo("No Braille", "Translate text first (click Translate).")

            return

        img = render_braille_image(braille, dot_radius=10, dot_gap=12, cell_gap=16)

        path = filedialog.asksaveasfilename(defaultextension=".png", filetypes=[("PNG image","*.png")], title="Save Braille image")

        if path:

            img.save(path)

            messagebox.showinfo("Saved", f"Braille image saved to:\n{path}")


    def on_copy_clipboard(self):

        braille = self.braille_text_widget.get("1.0", "end").rstrip("\n")

        if not braille:

            messagebox.showinfo("No Braille", "Translate text first (click Translate).")

            return

        # Use Tk clipboard

        self.root.clipboard_clear()

        self.root.clipboard_append(braille)

        messagebox.showinfo("Copied", "Braille Unicode copied to clipboard.")


    def show_preview(self, pil_img):

        # Resize preview if too big

        max_w, max_h = 760, 240

        w, h = pil_img.size

        scale = min(max_w / w, max_h / h, 1.0)

        if scale < 1.0:

            pil_img = pil_img.resize((int(w*scale), int(h*scale)), Image.LANCZOS)

        tk_img = ImageTk.PhotoImage(pil_img)

        self.preview_label.config(image=tk_img)

        self.preview_label.image = tk_img  # keep ref


# -------------------------

# Run the app

# -------------------------

def main():

    root = tk.Tk()

    app = BrailleGUI(root)

    root.mainloop()


if __name__ == "__main__":

    main()


Music Sheet to Audio Converter

 """

sheet_to_midi.py


Simple prototype: Convert a scanned single-line, monophonic staff in TREBLE CLEF

to a MIDI file using OpenCV -> heuristic notehead detection -> music21.


Limitations:

 - Monophonic, printed notation, single staff detection.

 - Treats each notehead as a quarter note by default.

 - No clef/key/time signature detection (assumes treble clef, 4/4).

 - Not a replacement for full OMR systems like Audiveris.


Usage:

    python sheet_to_midi.py input_image.png output.mid

"""


import sys

import cv2

import numpy as np

import math

from music21 import stream, note, midi, tempo, meter

from PIL import Image


# -------------------------

# Utility & image helpers

# -------------------------

def load_image(path):

    img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

    if img is None:

        raise FileNotFoundError(f"Cannot open image: {path}")

    return img


def binarize(img):

    # Adaptive threshold - robust to lighting

    th = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,

                               cv2.THRESH_BINARY_INV, 15, 10)

    return th


# -------------------------

# Staff line detection

# -------------------------

def detect_staff_lines(binary_img, debug=False):

    """

    Detect horizontal staff lines using morphological operations and Hough or projection.

    Returns list of y-positions of detected lines (sorted).

    """

    h, w = binary_img.shape


    # Use horizontal morphological kernel to enhance staff lines

    horizontal_size = max(10, w // 30)

    horiz_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))

    hor = cv2.morphologyEx(binary_img, cv2.MORPH_OPEN, horiz_kernel)


    # Sum across columns to get projection

    proj = np.sum(hor, axis=1)

    # Normalize

    proj = (proj - proj.min()) / (proj.max() - proj.min() + 1e-9)


    # Find peaks in projection where staff lines are

    thresh = 0.15  # tunable

    candidates = np.where(proj > thresh)[0]


    if len(candidates) == 0:

        return []


    # Group contiguous regions into single lines (cluster by gaps)

    lines = []

    current = [candidates[0]]

    for r in candidates[1:]:

        if r - current[-1] <= 2:

            current.append(r)

        else:

            # average

            lines.append(int(np.mean(current)))

            current = [r]

    if current:

        lines.append(int(np.mean(current)))


    # Staffs are sets of 5 lines close to each other. Find clusters of 5 lines

    # For simplicity, find any groups of 5 lines with roughly equal spacing

    # If more than 5 lines are present (multiple staves), return the first 5-line group

    if len(lines) < 5:

        return lines  # fallback


    # sliding window of size 5, measure spacing variance

    best_group = None

    best_score = 1e9

    for i in range(0, len(lines) - 4):

        group = lines[i:i+5]

        spacings = np.diff(group)

        score = np.var(spacings)  # we want equal spacings

        if score < best_score:

            best_score = score

            best_group = group


    if best_group is None:

        return lines[:5]

    return best_group


# -------------------------

# Note head detection

# -------------------------

def detect_noteheads(binary_img, staff_lines, debug=False):

    """

    Detect connected components that look like noteheads.

    Return list of bounding boxes (x, y, w, h).

    """

    # Remove staff lines from binary image to avoid splitting noteheads:

    img_nolines = binary_img.copy()

    # Create a mask of lines using morphological ops similar to detection

    h, w = binary_img.shape

    horizontal_size = max(10, w // 30)

    horiz_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))

    hor = cv2.morphologyEx(binary_img, cv2.MORPH_OPEN, horiz_kernel)

    img_nolines = cv2.bitwise_and(img_nolines, cv2.bitwise_not(hor))


    # Morph close small gaps to make noteheads full blobs

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))

    img_nolines = cv2.morphologyEx(img_nolines, cv2.MORPH_CLOSE, kernel, iterations=1)


    # Find contours

    contours, _ = cv2.findContours(img_nolines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)


    boxes = []

    for cnt in contours:

        x, y, wbox, hbox = cv2.boundingRect(cnt)

        area = cv2.contourArea(cnt)

        # heuristics for notehead sizes

        if area < 30:  # too small noise

            continue

        # discard very tall/thin objects (likely stems or flags)

        if hbox > 3 * wbox and hbox > 40:

            continue

        # also discard huge regions (like staff text)

        if wbox > binary_img.shape[1] * 0.6:

            continue

        boxes.append((x, y, wbox, hbox))


    # Sort left-to-right

    boxes.sort(key=lambda b: b[0])

    return boxes


# -------------------------

# Map vertical position to pitch (treble clef)

# -------------------------

def map_y_to_pitch(y_center, staff_lines):

    """

    Given y-coordinate and list of 5 staff line y-positions (top->bottom),

    compute the pitch name using treble clef mapping.

    We'll map lines & spaces to steps; middle C is one ledger line below staff in treble clef.

    The mapping: from top line down:

      Line 1 (top) -> F5

      Space -> E5

      Line 2 -> D5

      ...

    We'll build a scale of positions (lines and spaces) with corresponding MIDI note numbers.

    """

    # Convert staff_lines sorted top->bottom

    lines = sorted(staff_lines)

    # staff spacing

    spacing = np.median(np.diff(lines))

    # Build reference positions: lines and spaces extending several positions above/below

    # We'll define positions with index 0 at top line, increasing downward by half-step (line->space->line)

    positions = []

    labels = []  # MIDI numbers

    # Let's compute the center y of each "position" for -6..+12 positions relative to top line

    # Determine MIDI mapping: top line (F5) midi 77. Use standard: F5=77, E5=76, D5=74? Wait careful...

    # Simpler: define mapping for relative positions using steps in diatonic scale (not semitone), but easiest is map to note names by index:

    # We'll build a list of note names starting from some reference. Let's compute using music21 for correctness.

    from music21 import pitch

    # We'll compute positions: every half staff-step is spacing/2

    half = spacing / 2.0

    # Let's create position centers from -6 to +18 (enough ledger lines)

    pos_centers = [lines[0] - 6*half + i*half for i in range(40)]

    # Now assign note names: find which position corresponds to which diatonic step.

    # Determine which index corresponds to the top staff line (lines[0])

    idx_top_line = int(round((lines[0] - pos_centers[0]) / half))

    # For treble clef: top line is F5 (MIDI 77)

    top_midi = pitch.Pitch('F5').midi  # 77

    # Each position step (line->space->line) moves by one diatonic step (i.e., one scale degree), which may be 1 or 2 semitones.

    # But easier: we can build a list of midi numbers by moving by semitone steps of a diatonic scale: approximate by mapping every position to midi by using

    # semitone step of 1 for each half-step (this maps to chromatic steps which is fine but won't respect staff spacing perfectly for accidentals).

    # Simpler: treat each position as semitone steps from top line: top line index -> top_midi, next half position -> top_midi - 1, etc.

    # This yields a chromatic mapping: adjacent positions = 1 semitone. This is a simplification (in real staff adjacent positions are diatonic).

    midi_for_pos = []

    for i in range(len(pos_centers)):

        midi_for_pos.append(top_midi - (i - idx_top_line))


    # Identify closest pos index for given y_center

    diffs = [abs(y_center - c) for c in pos_centers]

    pos_idx = int(np.argmin(diffs))

    midi = int(round(midi_for_pos[pos_idx]))

    # Convert midi to note name

    p = pitch.Pitch()

    p.midi = midi

    return p.nameWithOctave


# -------------------------

# Build music21 stream from detected notes

# -------------------------

def build_stream_from_boxes(boxes, staff_lines, tempo_bpm=100):

    s = stream.Stream()

    s.append(tempo.MetronomeMark(number=tempo_bpm))

    # Simple 4/4 time signature

    s.append(meter.TimeSignature('4/4'))


    # For each bounding box left->right, map to pitch and create quarter notes

    # More advanced: group boxes near same x to chords, or detect stems to find durations (not implemented)

    for (x, y, wbox, hbox) in boxes:

        cx = x + wbox / 2.0

        cy = y + hbox / 2.0

        pitch_name = map_y_to_pitch(cy, staff_lines)

        n = note.Note(pitch_name)

        n.duration.quarterLength = 1.0  # quarter note default

        s.append(n)

    return s


# -------------------------

# Main flow

# -------------------------

def process_image_to_midi(input_path, output_midi_path, debug=False):

    img = load_image(input_path)

    bin_img = binarize(img)


    staff_lines = detect_staff_lines(bin_img, debug=debug)

    if not staff_lines or len(staff_lines) < 5:

        print("Warning: could not detect 5 staff lines reliably. Trying to proceed with available lines.")

    else:

        print("Detected staff lines (y-coordinates):", staff_lines)


    boxes = detect_noteheads(bin_img, staff_lines, debug=debug)

    if not boxes:

        print("No noteheads detected. Exiting.")

        return False


    print(f"Detected {len(boxes)} candidate noteheads (left→right).")

    for i, b in enumerate(boxes, start=1):

        x, y, wbox, hbox = b

        print(f"{i}: x={x}, y={y}, w={wbox}, h={hbox}")


    music_stream = build_stream_from_boxes(boxes, staff_lines, tempo_bpm=100)


    # Export to MIDI

    mf = midi.translate.streamToMidiFile(music_stream)

    mf.open(output_midi_path, 'wb')

    mf.write()

    mf.close()

    print(f"MIDI saved to {output_midi_path}")

    return True


# -------------------------

# CLI

# -------------------------

if __name__ == "__main__":

    if len(sys.argv) < 3:

        print("Usage: python sheet_to_midi.py input_image.png output.mid")

        sys.exit(1)

    inp = sys.argv[1]

    out = sys.argv[2]

    ok = process_image_to_midi(inp, out, debug=True)

    if not ok:

        sys.exit(2)


AI-powered Meme Generator

 """

AI-powered Meme Generator

- Pick a local image or image URL

- Generate captions with OpenAI (optional)

- Render meme-style text (top/bottom) on the image and save


Dependencies:

  pip install openai pillow requests python-dotenv

"""


import os

import textwrap

import requests

from io import BytesIO

from typing import List, Optional


from PIL import Image, ImageDraw, ImageFont, ImageOps


# Optional: OpenAI

try:

    import openai

    OPENAI_OK = True

except Exception:

    OPENAI_OK = False


# Optional .env loader

try:

    from dotenv import load_dotenv

    load_dotenv()

except Exception:

    pass


# If OPENAI_API_KEY in env, set key for openai package

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

if OPENAI_API_KEY and OPENAI_OK:

    openai.api_key = OPENAI_API_KEY


# ---------------------------

# Config

# ---------------------------

DEFAULT_FONT = None  # if None, use PIL default (or set path to .ttf)

FONT_SIZE_RATIO = 0.07  # fraction of image height to use as font size for main caption

STROKE_WIDTH_RATIO = 0.008  # stroke width relative to image height

TOP_BOTTOM_MARGIN_RATIO = 0.03  # vertical margin as fraction of image height

MAX_LINES = 3  # maximum lines of text for top/bottom each


# Some offline caption templates (fallback)

OFFLINE_TEMPLATES = [

    "When you finally fix the bug and the build passes",

    "Me: I'll sleep early tonight\nAlso me at 3 AM:",

    "That feeling when coffee kicks in",

    "POV: You open the fridge and forget what you wanted",

    "When your code works on the first run",

    "Expectation vs Reality",

    "When someone says 'just restart it'",

    "When the meeting could have been an email",

    "I don't always test my code, but when I do, I do it in production",

    "When you say you'll 'quickly' refactor"

]


# ---------------------------

# Utilities: Image loading

# ---------------------------

def load_image_from_path_or_url(path_or_url: str) -> Image.Image:

    """Load image from filesystem path or HTTP(S) URL."""

    if path_or_url.startswith("http://") or path_or_url.startswith("https://"):

        resp = requests.get(path_or_url, timeout=15)

        resp.raise_for_status()

        return Image.open(BytesIO(resp.content)).convert("RGBA")

    else:

        return Image.open(path_or_url).convert("RGBA")


# ---------------------------

# AI caption generation

# ---------------------------

def generate_captions_ai(prompt_context: str, n: int = 6, engine: str = "gpt-4") -> List[str]:

    """

    Use OpenAI to generate meme caption suggestions.

    - prompt_context: short description of the image or theme

    - n: number of suggestions

    """

    if not OPENAI_API_KEY or not OPENAI_OK:

        raise RuntimeError("OpenAI SDK not available or OPENAI_API_KEY not set.")

    # Compose a concise system/user prompt to ask for short, meme-style captions.

    system = (

        "You are a creative meme caption generator. "

        "Given a short description of an image or theme, produce short, humorous, internet-style captions. "

        "Return an array of captions without numbering. Keep each caption to 1-2 lines. Avoid offensive content."

    )

    user = (

        f"Image description / theme: {prompt_context}\n\n"

        "Produce exactly {} short caption suggestions (1-2 lines each). Use casual, meme-friendly tone."

        .format(n)

    )

    # Use Chat Completions API (ChatCompletion) if available

    try:

        resp = openai.ChatCompletion.create(

            model="gpt-4",

            messages=[

                {"role": "system", "content": system},

                {"role": "user", "content": user}

            ],

            max_tokens=400,

            temperature=0.8,

            n=1,

        )

        content = resp["choices"][0]["message"]["content"].strip()

        # Try to split into lines or bullets

        captions = []

        for line in content.splitlines():

            line = line.strip()

            if not line:

                continue

            # strip bullet numbers

            if line[0].isdigit() and (line[1:3] == "." or line[1:2] == ")"):

                line = line.split(".", 1)[-1].strip()

            if line.startswith("-") or line.startswith("•"):

                line = line[1:].strip()

            captions.append(line)

        # If we have fewer than requested, split by ';' or ' / '

        if len(captions) < n:

            parts = [p.strip() for p in content.replace("\n", ";").split(";") if p.strip()]

            captions = parts[:n]

        return captions[:n]

    except Exception as e:

        # fallback: raise to caller

        raise RuntimeError(f"OpenAI request failed: {e}")


# ---------------------------

# Text rendering on image (classic meme style)

# ---------------------------

def select_font(size_px: int) -> ImageFont.FreeTypeFont:

    """Try to load Impact-like font or fallback to default PIL font."""

    # Common Impact paths (Windows). You can ship an included ttf with your blog repo.

    paths = [

        "Impact.ttf",  # local copy

        "/usr/share/fonts/truetype/impact/Impact.ttf",

        "/usr/share/fonts/truetype/msttcorefonts/Impact.ttf",

        "/Library/Fonts/Impact.ttf",

        "/usr/share/fonts/truetype/freefont/FreeSansBold.ttf",

    ]

    if DEFAULT_FONT:

        paths.insert(0, DEFAULT_FONT)

    for p in paths:

        try:

            if os.path.exists(p):

                return ImageFont.truetype(p, size_px)

        except Exception:

            continue

    # Fallback

    return ImageFont.load_default()


def draw_text_with_stroke(draw: ImageDraw.Draw, position, text, font, fill="white", stroke_fill="black", stroke_width=2, align="center"):

    """Draw text with stroke (outline) for good readability."""

    x, y = position

    # Pillow >=8 supports stroke parameters; but to be robust we do manual strokes around offsets

    try:

        draw.text((x, y), text, font=font, fill=fill, stroke_width=stroke_width, stroke_fill=stroke_fill, anchor="ms", align=align)

    except TypeError:

        # manual stroke (8 neighbors)

        for dx in range(-stroke_width, stroke_width+1):

            for dy in range(-stroke_width, stroke_width+1):

                if dx == 0 and dy == 0:

                    continue

                draw.text((x+dx, y+dy), text, font=font, fill=stroke_fill, anchor="ms", align=align)

        draw.text((x, y), text, font=font, fill=fill, anchor="ms", align=align)


def wrap_text_for_width(text: str, font: ImageFont.FreeTypeFont, max_width: int) -> List[str]:

    """Wrap text into lines that fit into max_width using the provided font."""

    lines = []

    # try naive wrap

    wrapper = textwrap.TextWrapper(width=60)

    words = text.split()

    if not words:

        return []

    # greedy algorithm: keep adding words until width exceeded

    cur = words[0]

    for w in words[1:]:

        test = cur + " " + w

        if font.getsize(test)[0] <= max_width:

            cur = test

        else:

            lines.append(cur)

            cur = w

    lines.append(cur)

    # If there are still too many lines, try to compress line breaks (reduce number of lines)

    return lines


def render_meme(image: Image.Image, top_text: Optional[str], bottom_text: Optional[str], out_path: str):

    """

    Render top and/or bottom text on the image in meme style and save to out_path.

    - top_text and bottom_text can be multi-line; function will wrap to fit.

    """

    img = image.convert("RGBA")

    w, h = img.size

    draw = ImageDraw.Draw(img)


    # Decide font sizes relative to image height

    font_size = max(14, int(h * FONT_SIZE_RATIO))

    stroke_w = max(1, int(h * STROKE_WIDTH_RATIO))

    font = select_font(font_size)


    # available text width is image width minus margins

    max_text_width = int(w * 0.92)


    def draw_block(text, y_anchor):

        if not text:

            return

        # Wrap into lines

        lines = []

        # naive wrapping tries smaller font if too many lines

        words = text.splitlines() if "\n" in text else [text]

        # flatten multiple lines and re-wrap each

        combined = " ".join(l.strip() for l in words if l.strip())

        # iterative approach: try to wrap and if too many lines, reduce font

        curr_font = font

        for attempt in range(4):

            # greedily wrap to fit

            lines = wrap_text_for_width(combined, curr_font, max_text_width)

            if len(lines) <= MAX_LINES:

                break

            # reduce font size and retry

            fs = max(12, int(curr_font.size * 0.9))

            curr_font = select_font(fs)

        # compute total height for block

        line_heights = [curr_font.getsize(line)[1] for line in lines]

        block_h = sum(line_heights) + (len(lines)-1) * 4


        # starting y depending on anchor ("top" or "bottom")

        if y_anchor == "top":

            y = int(h * TOP_BOTTOM_MARGIN_RATIO) + curr_font.getsize(lines[0])[1]//2

        else:  # bottom

            y = h - int(h * TOP_BOTTOM_MARGIN_RATIO) - block_h + curr_font.getsize(lines[0])[1]//2


        # draw each line centered

        for line in lines:

            x = w // 2

            draw_text_with_stroke(draw, (x, y), line, font=curr_font, fill="white", stroke_fill="black", stroke_width=stroke_w, align="center")

            y += curr_font.getsize(line)[1] + 4


    draw_block(top_text, "top")

    draw_block(bottom_text, "bottom")


    # Convert back and save

    final = img.convert("RGB")

    final.save(out_path, quality=95)

    return out_path


# ---------------------------

# Small CLI / Interactive prompt

# ---------------------------

def choose_from_list(prompt: str, options: List[str]) -> int:

    """Let user pick an index from options on the console."""

    print(prompt)

    for i, o in enumerate(options, start=1):

        print(f"{i}. {o}")

    while True:

        try:

            sel = input("Choose number (or 'c' to cancel): ").strip()

            if sel.lower() == "c":

                return -1

            idx = int(sel)-1

            if 0 <= idx < len(options):

                return idx

        except Exception:

            pass

        print("Invalid choice. Try again.")


def main_interactive():

    print("=== AI-Powered Meme Generator ===")

    print("Enter local image path, or an image URL (http/https).")

    path = input("Image path or URL: ").strip()

    if not path:

        print("No image provided — exiting.")

        return


    try:

        img = load_image_from_path_or_url(path)

    except Exception as e:

        print("Failed to load image:", e)

        return


    use_ai = False

    if OPENAI_API_KEY and OPENAI_OK:

        ans = input("Generate AI captions? (y/N): ").strip().lower()

        use_ai = ans == "y"

    else:

        print("OpenAI not configured — using offline caption templates.")


    captions = []

    if use_ai:

        desc = input("Describe the image/theme briefly (or press Enter to auto-describe): ").strip()

        if not desc:

            desc = "A funny photo suitable for memes (describe notable objects / people / mood)."

        try:

            captions = generate_captions_ai(desc, n=8)

        except Exception as e:

            print("AI caption generation failed:", e)

            print("Falling back to offline templates.")

            captions = OFFLINE_TEMPLATES.copy()

    else:

        captions = OFFLINE_TEMPLATES.copy()


    # Show captions and pick or let user type

    idx = choose_from_list("Choose a caption or pick 0 to enter your own:", ["[Type custom caption]"] + captions)

    if idx == -1:

        print("Cancelled.")

        return

    if idx == 0:

        custom = input("Enter custom caption (use '\\n' for line break): ")

        chosen = custom

    else:

        chosen = captions[idx-1]


    # Choose top/bottom or both

    pos_choice = input("Place caption at (1) Top, (2) Bottom, (3) Both? [2]: ").strip() or "2"

    if pos_choice not in ("1","2","3"):

        pos_choice = "2"

    if pos_choice == "1":

        top_text = chosen

        bottom_text = None

    elif pos_choice == "2":

        top_text = None

        bottom_text = chosen

    else:

        # Ask for a second caption or reuse

        second_idx = choose_from_list("Pick another caption for the other position or type custom:", ["[Type custom]"] + captions)

        if second_idx == -1:

            print("Cancelled.")

            return

        if second_idx == 0:

            second = input("Second caption: ")

        else:

            second = captions[second_idx-1]

        top_text = chosen

        bottom_text = second


    # Save file

    out_name = input("Output filename (default: meme_output.jpg): ").strip() or "meme_output.jpg"

    try:

        out_path = render_meme(img, top_text, bottom_text, out_name)

        print("Saved meme to", out_path)

    except Exception as e:

        print("Failed to render meme:", e)


if __name__ == "__main__":

    main_interactive()


Virtual Plant Care Simulator

import pygame

import sys

import json

import math

import time

from datetime import datetime


# -------------------------

# Config

# -------------------------

SAVE_FILE = "plant_save.json"

WIDTH, HEIGHT = 720, 560

FPS = 60


# In-game time scale: how many in-game minutes pass per real second

TIME_SCALE_MIN_PER_SEC = 5.0            # 1 real sec = 5 in-game minutes

TURBO_MULTIPLIER = 10.0                 # Turbo mode speed-up


# Decay & growth tuning (per in-game minute)

MOISTURE_DECAY = 0.10                   # moisture decreases per minute

NUTRIENT_DECAY = 0.02                   # nutrients decreases per minute

HEALTH_INC_RATE = 0.02                  # health regen per minute (good care)

HEALTH_DEC_RATE = 0.05                  # health loss per minute (bad care)

HEALTH_SEVERE_PENALTY = 0.20            # extra loss if moisture is 0


WATER_AMOUNT = 35                       # moisture gained per watering

FERTILIZER_AMOUNT = 25                  # nutrients gained per fertilize


GOOD_MOISTURE = 50

GOOD_NUTRIENTS = 50

LOW_MOISTURE = 30

LOW_NUTRIENTS = 30


# Growth stages by age (in-game minutes)

STAGES = [

    ("Seed", 0),

    ("Sprout", 12 * 60),         # 12 hours

    ("Young", 36 * 60),          # 1.5 days

    ("Mature", 3 * 24 * 60),     # 3 days

    ("Blooming", 7 * 24 * 60),   # 1 week

]


# Colors

WHITE = (255, 255, 255)

BG = (238, 245, 239)

TEXT = (34, 40, 49)

CARD = (245, 250, 245)

GREEN = (72, 157, 77)

YELLOW = (240, 190, 40)

RED = (220, 76, 70)

BLUE = (66, 135, 245)

BROWN = (99, 68, 48)

SOIL = (84, 59, 42)

GREY = (200, 205, 210)


pygame.init()

pygame.display.set_caption("Virtual Plant Care Simulator 🌱")

screen = pygame.display.set_mode((WIDTH, HEIGHT))

clock = pygame.time.Clock()

FONT = pygame.font.SysFont("consolas", 18)

FONT_BIG = pygame.font.SysFont("consolas", 24, bold=True)


# -------------------------

# Helpers

# -------------------------

def clamp(v, lo=0.0, hi=100.0):

    return max(lo, min(hi, v))


def now_iso():

    return datetime.now().isoformat()


def load_state():

    try:

        with open(SAVE_FILE, "r", encoding="utf-8") as f:

            data = json.load(f)

        # basic sanity defaults

        data.setdefault("name", "Leafy")

        data.setdefault("age_minutes", 0.0)

        data.setdefault("moisture", 70.0)

        data.setdefault("nutrients", 60.0)

        data.setdefault("health", 80.0)

        data.setdefault("alive", True)

        data.setdefault("last_update_iso", now_iso())

        data.setdefault("turbo", False)

        return data

    except FileNotFoundError:

        return {

            "name": "Leafy",

            "age_minutes": 0.0,

            "moisture": 70.0,

            "nutrients": 60.0,

            "health": 80.0,

            "alive": True,

            "last_update_iso": now_iso(),

            "turbo": False

        }


def save_state(state):

    with open(SAVE_FILE, "w", encoding="utf-8") as f:

        json.dump(state, f, indent=2)


def stage_from_age(age_min):

    current = STAGES[0][0]

    for name, threshold in STAGES:

        if age_min >= threshold:

            current = name

        else:

            break

    return current


def draw_bar(surface, x, y, w, h, pct, color, label):

    pygame.draw.rect(surface, GREY, (x, y, w, h), border_radius=6)

    inner_w = int(w * clamp(pct/100.0, 0, 1))

    pygame.draw.rect(surface, color, (x, y, inner_w, h), border_radius=6)

    txt = FONT.render(f"{label}: {int(pct)}%", True, TEXT)

    surface.blit(txt, (x + 8, y + h//2 - txt.get_height()//2))


def draw_button(surface, rect, text, active=True):

    col = CARD if active else (230,230,230)

    pygame.draw.rect(surface, col, rect, border_radius=8)

    pygame.draw.rect(surface, GREY, rect, width=1, border_radius=8)

    t = FONT.render(text, True, TEXT if active else (140,140,140))

    surface.blit(t, (rect.x + rect.w//2 - t.get_width()//2,

                     rect.y + rect.h//2 - t.get_height()//2))


def nice_time(minutes):

    d = int(minutes // (24*60))

    h = int((minutes % (24*60)) // 60)

    m = int(minutes % 60)

    parts = []

    if d: parts.append(f"{d}d")

    if h: parts.append(f"{h}h")

    parts.append(f"{m}m")

    return " ".join(parts)


# -------------------------

# Plant rendering

# -------------------------

def draw_pot(surface, cx, base_y):

    pot_w = 160

    pot_h = 70

    rim_h = 18

    # pot body

    pygame.draw.rect(surface, BROWN, (cx - pot_w//2, base_y - pot_h, pot_w, pot_h), border_radius=10)

    # soil

    pygame.draw.rect(surface, SOIL, (cx - pot_w//2 + 10, base_y - pot_h + rim_h, pot_w - 20, pot_h - rim_h - 6), border_radius=8)

    # rim

    pygame.draw.rect(surface, (120, 84, 60), (cx - pot_w//2, base_y - pot_h - 6, pot_w, rim_h), border_radius=8)


def draw_plant(surface, cx, base_y, age_min, health, moisture, nutrients, t):

    """

    Procedural plant:

    - Height scales with age & health

    - Stem sways using sin wave

    - Leaves hue via moisture/nutrients

    """

    # growth factor

    stage = stage_from_age(age_min)

    stage_index = [s[0] for s in STAGES].index(stage)

    height = 40 + stage_index * 35 + (health/100.0) * 30  # 40..~220


    # sway

    sway = math.sin(t * 2.0) * 8  # px

    top_x = cx + sway

    top_y = base_y - height


    # stem

    points = []

    segments = 8

    for i in range(segments+1):

        y = base_y - (height * i / segments)

        x = cx + math.sin(t*2 + i*0.6) * (8 * (i/segments))

        points.append((x, y))

    # draw stem as polyline (thicker at base)

    for i in range(len(points)-1):

        w = int(8 - 6*(i/segments))

        pygame.draw.line(surface, (46, 120, 52), points[i], points[i+1], max(2, w))


    # leaf color based on care

    care_score = (moisture/100.0 + nutrients/100.0 + health/100.0) / 3.0

    leaf_col = (

        int(60 + 120 * care_score),    # R

        int(120 + 120 * care_score),   # G

        int(60 + 80 * care_score)      # B

    )


    # leaves along stem

    for i in range(2, segments):

        px, py = points[i]

        lr = 18 + 6*(i/segments)

        angle = math.sin(t*3 + i) * 0.5 + (1 if i%2==0 else -1)*0.6

        # left leaf

        leaf_poly(surface, px, py, lr, angle, leaf_col)

        # right leaf

        leaf_poly(surface, px, py, lr, -angle, leaf_col)


    # bud/flower at top for late stages

    if stage in ("Mature", "Blooming"):

        r = 10 if stage == "Mature" else 14 + 4*math.sin(t*4)

        pygame.draw.circle(surface, (240, 110, 130), (int(points[-1][0]), int(points[-1][1])-8), int(abs(r)))


def leaf_poly(surface, x, y, r, angle, col):

    # Draw a simple rotated leaf (ellipse-ish polygon)

    pts = []

    steps = 10

    for i in range(steps+1):

        theta = -math.pi/2 + math.pi * (i/steps)

        px = x + r * math.cos(theta) * 0.4

        py = y + r * math.sin(theta)

        # rotate around (x,y)

        rx = x + (px - x) * math.cos(angle) - (py - y) * math.sin(angle)

        ry = y + (px - x) * math.sin(angle) + (py - y) * math.cos(angle)

        pts.append((rx, ry))

    pygame.draw.polygon(surface, col, pts)


# -------------------------

# Update logic

# -------------------------

def update_state(state, dt_minutes):

    if not state["alive"]:

        return


    # age grows faster when healthy

    health_factor = 0.5 + (state["health"]/100.0) * 0.5  # 0.5..1.0

    state["age_minutes"] += dt_minutes * health_factor


    # resource decay

    state["moisture"] = clamp(state["moisture"] - MOISTURE_DECAY * dt_minutes)

    state["nutrients"] = clamp(state["nutrients"] - NUTRIENT_DECAY * dt_minutes)


    # health dynamics

    good_care = (state["moisture"] >= GOOD_MOISTURE) and (state["nutrients"] >= GOOD_NUTRIENTS)

    low_care = (state["moisture"] < LOW_MOISTURE) or (state["nutrients"] < LOW_NUTRIENTS)


    if good_care:

        state["health"] = clamp(state["health"] + HEALTH_INC_RATE * dt_minutes)

    if low_care:

        penalty = HEALTH_DEC_RATE * dt_minutes

        # severe dehydration hurts more

        if state["moisture"] <= 0.1:

            penalty += HEALTH_SEVERE_PENALTY * dt_minutes

        state["health"] = clamp(state["health"] - penalty)


    if state["health"] <= 0.0:

        state["alive"] = False


def water(state):

    if not state["alive"]: return

    state["moisture"] = clamp(state["moisture"] + WATER_AMOUNT)


def fertilize(state):

    if not state["alive"]: return

    state["nutrients"] = clamp(state["nutrients"] + FERTILIZER_AMOUNT)


def revive(state):

    # Soft revive to keep testing

    state["alive"] = True

    state["health"] = max(state["health"], 40.0)

    state["moisture"] = max(state["moisture"], 40.0)


# -------------------------

# UI setup

# -------------------------

BUTTONS = {

    "water": pygame.Rect(40, 460, 140, 36),

    "fertilize": pygame.Rect(200, 460, 140, 36),

    "save": pygame.Rect(360, 460, 120, 36),

    "turbo": pygame.Rect(500, 460, 160, 36),

    "revive": pygame.Rect(40, 510, 120, 34),

    "new": pygame.Rect(180, 510, 120, 34),

    "quit": pygame.Rect(320, 510, 120, 34),

}


def reset_new_plant():

    return {

        "name": "Leafy",

        "age_minutes": 0.0,

        "moisture": 70.0,

        "nutrients": 60.0,

        "health": 80.0,

        "alive": True,

        "last_update_iso": now_iso(),

        "turbo": False

    }


# -------------------------

# Main loop

# -------------------------

def main():

    state = load_state()


    last_real = time.time()

    running = True


    while running:

        real_now = time.time()

        real_dt = real_now - last_real

        last_real = real_now


        # Time scale

        speed = TIME_SCALE_MIN_PER_SEC * (TURBO_MULTIPLIER if state.get("turbo") else 1.0)

        dt_minutes = real_dt * speed


        # Update plant based on dt

        update_state(state, dt_minutes)


        # Draw

        screen.fill(BG)


        # Left info card

        pygame.draw.rect(screen, CARD, (24, 24, 280, 410), border_radius=12)

        pygame.draw.rect(screen, GREY, (24, 24, 280, 410), width=1, border_radius=12)


        title = FONT_BIG.render("Plant Status", True, TEXT)

        screen.blit(title, (40, 36))


        # Bars

        draw_bar(screen, 40, 80, 240, 20, state["health"], GREEN if state["health"] >= 50 else YELLOW if state["health"] >= 25 else RED, "Health")

        draw_bar(screen, 40, 120, 240, 20, state["moisture"], BLUE if state["moisture"] >= 40 else YELLOW if state["moisture"] >= 20 else RED, "Moisture")

        draw_bar(screen, 40, 160, 240, 20, state["nutrients"], GREEN if state["nutrients"] >= 40 else YELLOW if state["nutrients"] >= 20 else RED, "Nutrients")


        # Text stats

        lines = [

            f"Name: {state['name']}",

            f"Stage: {stage_from_age(state['age_minutes'])}",

            f"Age: {nice_time(state['age_minutes'])}",

            f"Alive: {'Yes' if state['alive'] else 'No'}",

            f"Speed: {'Turbo' if state.get('turbo') else 'Normal'} (x{int(TURBO_MULTIPLIER) if state.get('turbo') else 1})",

            "",

            "Tips:",

            "- Keep moisture & nutrients ≥ 50.",

            "- Turbo to fast-forward growth.",

            "- Save to keep progress.",

            "- Revive for testing if it dies."

        ]

        y = 200

        for ln in lines:

            txt = FONT.render(ln, True, TEXT)

            screen.blit(txt, (40, y))

            y += 22


        # Plant area (right)

        pygame.draw.rect(screen, CARD, (324, 24, 372, 410), border_radius=12)

        pygame.draw.rect(screen, GREY, (324, 24, 372, 410), width=1, border_radius=12)

        arena = pygame.Rect(324, 24, 372, 410)

        cx = arena.x + arena.w//2

        base_y = arena.y + arena.h - 40


        # Ground line

        pygame.draw.line(screen, (180, 180, 180), (arena.x + 20, base_y), (arena.right - 20, base_y), 2)


        # Draw pot & plant

        draw_pot(screen, cx, base_y)

        t = pygame.time.get_ticks() / 1000.0

        draw_plant(screen, cx, base_y, state["age_minutes"], state["health"], state["moisture"], state["nutrients"], t)


        # Buttons

        draw_button(screen, BUTTONS["water"], "💧 Water", active=state["alive"])

        draw_button(screen, BUTTONS["fertilize"], "🧪 Fertilize", active=state["alive"])

        draw_button(screen, BUTTONS["save"], "💾 Save")

        draw_button(screen, BUTTONS["turbo"], f"⚡ Turbo: {'ON' if state.get('turbo') else 'OFF'}")

        draw_button(screen, BUTTONS["revive"], "❤️ Revive")

        draw_button(screen, BUTTONS["new"], "🌱 New Plant")

        draw_button(screen, BUTTONS["quit"], "🚪 Quit")


        # Warning labels

        warn_y = 430

        if state["moisture"] < LOW_MOISTURE:

            wtxt = FONT.render("Low Moisture! Water me 💧", True, BLUE)

            screen.blit(wtxt, (40, warn_y)); warn_y += 22

        if state["nutrients"] < LOW_NUTRIENTS:

            wtxt = FONT.render("Low Nutrients! Fertilize 🧪", True, (150, 100, 20))

            screen.blit(wtxt, (40, warn_y)); warn_y += 22

        if not state["alive"]:

            wtxt = FONT_BIG.render("The plant has died. Try Revive or New Plant.", True, RED)

            screen.blit(wtxt, (40, warn_y))


        pygame.display.flip()


        # Events

        for event in pygame.event.get():

            if event.type == pygame.QUIT:

                save_state(state)

                running = False

            elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:

                mx, my = event.pos

                if BUTTONS["water"].collidepoint(mx, my) and state["alive"]:

                    water(state)

                elif BUTTONS["fertilize"].collidepoint(mx, my) and state["alive"]:

                    fertilize(state)

                elif BUTTONS["save"].collidepoint(mx, my):

                    save_state(state)

                elif BUTTONS["turbo"].collidepoint(mx, my):

                    state["turbo"] = not state.get("turbo", False)

                elif BUTTONS["revive"].collidepoint(mx, my):

                    revive(state)

                elif BUTTONS["new"].collidepoint(mx, my):

                    state = reset_new_plant()

                elif BUTTONS["quit"].collidepoint(mx, my):

                    save_state(state)

                    running = False

            elif event.type == pygame.KEYDOWN:

                if event.key == pygame.K_w and state["alive"]:

                    water(state)

                elif event.key == pygame.K_f and state["alive"]:

                    fertilize(state)

                elif event.key == pygame.K_s:

                    save_state(state)

                elif event.key == pygame.K_t:

                    state["turbo"] = not state.get("turbo", False)

                elif event.key == pygame.K_r:

                    revive(state)

                elif event.key == pygame.K_n:

                    state = reset_new_plant()

                elif event.key == pygame.K_ESCAPE:

                    save_state(state)

                    running = False


        clock.tick(FPS)


    pygame.quit()

    sys.exit()


if __name__ == "__main__":

    main()


Handwritten Math Solver

 Install requirements

pip install tensorflow Pillow opencv-python numpy sympy

Train a digit model once (MNIST) — train_mnist_cnn.py

This trains a small CNN on MNIST and saves mnist_cnn.h5.

# train_mnist_cnn.py

import tensorflow as tf

from tensorflow import keras

from tensorflow.keras import layers


def build_model():

    model = keras.Sequential([

        layers.Input(shape=(28, 28, 1)),

        layers.Conv2D(32, 3, activation='relu'),

        layers.Conv2D(32, 3, activation='relu'),

        layers.MaxPooling2D(),

        layers.Dropout(0.25),


        layers.Conv2D(64, 3, activation='relu'),

        layers.Conv2D(64, 3, activation='relu'),

        layers.MaxPooling2D(),

        layers.Dropout(0.25),


        layers.Flatten(),

        layers.Dense(128, activation='relu'),

        layers.Dropout(0.5),

        layers.Dense(10, activation='softmax')

    ])

    model.compile(optimizer='adam',

                  loss='sparse_categorical_crossentropy',

                  metrics=['accuracy'])

    return model


def main():

    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

    x_train = x_train.astype("float32") / 255.0

    x_test = x_test.astype("float32") / 255.0

    x_train = x_train[..., None]

    x_test = x_test[..., None]


    model = build_model()

    model.fit(x_train, y_train, batch_size=128, epochs=5, validation_split=0.1)

    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0)

    print(f"Test accuracy: {test_acc:.4f}")

    model.save("mnist_cnn.h5")

    print("Saved model to mnist_cnn.h5")


if __name__ == "__main__":

    main()

The GUI solver — handwritten_math_solver.py

# handwritten_math_solver.py
import tkinter as tk
from tkinter import messagebox
from PIL import Image, ImageDraw, ImageOps
import numpy as np
import cv2
import io
from sympy import sympify, simplify
from tensorflow.keras.models import load_model
# ---- Config ----
MODEL_PATH = "mnist_cnn.h5"
CANVAS_SIZE = 400           # drawing canvas (square)
DRAW_WIDTH = 14             # brush thickness (thicker = easier OCR)
MIN_CONTOUR_AREA = 60       # filter noise
PADDING = 8                 # pad per glyph before resize to 28x28
# Heuristics thresholds for operators
MINUS_AR_THRESH = 2.0       # width/height > this → likely '-'
MINUS_HEIGHT_FRAC = 0.45    # symbol height relative to median digit height (shorter → minus)
PLUS_PEAK_FRAC = 0.6        # vertical and horizontal central peaks to consider '+'
class MathSolverApp:
    def __init__(self, root):
        self.root = root
        self.root.title("Handwritten Math Solver (digits + +/−)")
        # Canvas to draw
        self.canvas = tk.Canvas(root, width=CANVAS_SIZE, height=CANVAS_SIZE, bg="white", cursor="cross")
        self.canvas.grid(row=0, column=0, columnspan=3, padx=10, pady=10)
        # PIL image to accumulate strokes (black on white)
        self.image = Image.new("L", (CANVAS_SIZE, CANVAS_SIZE), color=255)
        self.draw = ImageDraw.Draw(self.image)
        # Bind drawing
        self.last_x, self.last_y = None, None
        self.canvas.bind("<ButtonPress-1>", self.pen_down)
        self.canvas.bind("<B1-Motion>", self.paint)
        self.canvas.bind("<ButtonRelease-1>", self.pen_up)
        # Buttons
        tk.Button(root, text="Recognize & Solve", command=self.recognize_and_solve).grid(row=1, column=0, pady=6)
        tk.Button(root, text="Clear", command=self.clear_canvas).grid(row=1, column=1, pady=6)
        tk.Button(root, text="Quit", command=root.quit).grid(row=1, column=2, pady=6)
        # Output
        self.expr_var = tk.StringVar(value="Expression: ")
        self.result_var = tk.StringVar(value="Result: ")
        self.step_text = tk.Text(root, width=60, height=10, wrap="word")
        tk.Label(root, textvariable=self.expr_var, anchor="w").grid(row=2, column=0, columnspan=3, sticky="w", padx=10)
        tk.Label(root, textvariable=self.result_var, anchor="w").grid(row=3, column=0, columnspan=3, sticky="w", padx=10)
        tk.Label(root, text="Steps:").grid(row=4, column=0, sticky="w", padx=10)
        self.step_text.grid(row=5, column=0, columnspan=3, padx=10, pady=4)
        # Load model
        try:
            self.model = load_model(MODEL_PATH)
        except Exception as e:
            messagebox.showerror("Model Error",
                                 f"Could not load {MODEL_PATH}.\nTrain it first with train_mnist_cnn.py.\n\n{e}")
            self.model = None
    # ---------- Drawing handlers ----------
    def pen_down(self, event):
        self.last_x, self.last_y = event.x, event.y
    def paint(self, event):
        if self.last_x is not None and self.last_y is not None:
            # Draw on Tk canvas
            self.canvas.create_line(self.last_x, self.last_y, event.x, event.y,
                                    width=DRAW_WIDTH, fill="black", capstyle=tk.ROUND, smooth=True)
            # Draw on PIL image
            self.draw.line([self.last_x, self.last_y, event.x, event.y],
                           fill=0, width=DRAW_WIDTH)
        self.last_x, self.last_y = event.x, event.y
    def pen_up(self, event):
        self.last_x, self.last_y = None, None
    def clear_canvas(self):
        self.canvas.delete("all")
        self.image = Image.new("L", (CANVAS_SIZE, CANVAS_SIZE), color=255)
        self.draw = ImageDraw.Draw(self.image)
        self.expr_var.set("Expression: ")
        self.result_var.set("Result: ")
        self.step_text.delete("1.0", tk.END)
    # ---------- Core pipeline ----------
    def recognize_and_solve(self):
        if self.model is None:
            messagebox.showwarning("Model", "Model not loaded.")
            return
        # Convert PIL to OpenCV
        img = np.array(self.image)
        expr, tokens_dbg = self.image_to_expression(img)
        if not expr:
            messagebox.showwarning("Parse", "Could not parse any symbols. Try writing bigger/cleaner.")
            return
        self.expr_var.set(f"Expression: {expr}")
        try:
            # Use sympy to evaluate
            sym_expr = sympify(expr)
            simplified = simplify(sym_expr)
            self.result_var.set(f"Result: {simplified}")
            # Show steps (simple for now)
            self.step_text.delete("1.0", tk.END)
            self.step_text.insert(tk.END, "Tokens (left→right):\n")
            self.step_text.insert(tk.END, " ".join(tokens_dbg) + "\n\n")
            self.step_text.insert(tk.END, f"SymPy parsed: {sym_expr}\n")
            if str(sym_expr) != str(simplified):
                self.step_text.insert(tk.END, f"Simplified: {simplified}\n")
            else:
                self.step_text.insert(tk.END, "No further simplification needed.\n")
        except Exception as e:
            messagebox.showerror("Evaluation Error", f"Failed to evaluate expression:\n{e}")
    def image_to_expression(self, gray_img: np.ndarray) -> tuple[str, list]:
        """
        Segment symbols, classify digits with CNN, infer + / - with projection heuristics.
        Returns (expression_string, debug_tokens)
        """
        # 1) Binarize & clean
        # Invert: handwriting is black (0), background white (255) => for OpenCV we want white-on-black for morphology ops.
        inv = 255 - gray_img
        # Threshold
        _, th = cv2.threshold(inv, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        # Morph open small noise
        kernel = np.ones((3,3), np.uint8)
        th = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel, iterations=1)
        # 2) Find contours (symbols)
        contours, _ = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        boxes = []
        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)
            area = w * h
            if area < MIN_CONTOUR_AREA:
                continue
            boxes.append((x, y, w, h))
        if not boxes:
            return "", []
        # Sort left-to-right
        boxes.sort(key=lambda b: b[0])
        # Median height (helps operator heuristics)
        med_h = np.median([h for (_, _, _, h) in boxes])
        tokens = []
        debug_tokens = []
        for (x, y, w, h) in boxes:
            crop = th[y:y+h, x:x+w]  # white ink on black background
            # Operator heuristic first (minus / plus)
            op = self.classify_operator(crop, w, h, med_h)
            if op is not None:
                tokens.append(op)
                debug_tokens.append(f"[{op}]")
                continue
            # Otherwise, digit classification
            digit = self.classify_digit(crop)
            if digit is None:
                # If not digit and not recognized operator, skip (or treat as minus attempt)
                # Safer to skip
                continue
            tokens.append(str(digit))
            debug_tokens.append(str(digit))
        # Merge digits & operators into expression string
        expr = self.tokens_to_expression(tokens)
        return expr, debug_tokens
    def classify_digit(self, crop_bin: np.ndarray) -> int | None:
        """
        Prepare glyph for MNIST CNN (28x28, centered), and predict 0-9.
        crop_bin: white ink on black background (binary)
        """
        # Make sure it's binary (0/255)
        crop = (crop_bin > 0).astype(np.uint8) * 255
        # Add padding
        crop = cv2.copyMakeBorder(crop, PADDING, PADDING, PADDING, PADDING, cv2.BORDER_CONSTANT, value=0)
        # Find tight box again after pad
        ys, xs = np.where(crop > 0)
        if len(xs) == 0 or len(ys) == 0:
            return None
        x0, x1 = xs.min(), xs.max()
        y0, y1 = ys.min(), ys.max()
        crop = crop[y0:y1+1, x0:x1+1]
        # Resize to 20x20 then center in 28x28 (like MNIST preprocessing)
        h, w = crop.shape
        if h > w:
            new_h = 20
            new_w = int(w * (20.0 / h))
        else:
            new_w = 20
            new_h = int(h * (20.0 / w))
        if new_h <= 0: new_h = 1
        if new_w <= 0: new_w = 1
        resized = cv2.resize(crop, (new_w, new_h), interpolation=cv2.INTER_AREA)
        canvas = np.zeros((28, 28), dtype=np.uint8)
        y_off = (28 - new_h) // 2
        x_off = (28 - new_w) // 2
        canvas[y_off:y_off+new_h, x_off:x_off+new_w] = resized
        # Normalize for model: MNIST is black background (0) with white strokes (1)
        img = canvas.astype("float32") / 255.0
        img = img[..., None]  # (28,28,1)
        pred = self.model.predict(img[None, ...], verbose=0)[0]
        cls = int(np.argmax(pred))
        conf = float(np.max(pred))
        # Optional confidence filtering
        if conf < 0.40:
            return None
        return cls
    def classify_operator(self, crop_bin: np.ndarray, w: int, h: int, med_h: float) -> str | None:
        """
        Very lightweight heuristics:
        - '-' : wide, short, one thick horizontal stroke (width/height large, height << median digit height)
        - '+' : strong central vertical and horizontal projections (peaks)
        """
        # Work on binary with 1s where stroke is present
        b = (crop_bin > 0).astype(np.uint8)
        # Aspect ratio heuristic for '-'
        if h > 0:
            ar = w / float(h)
        else:
            ar = 0
        # height relative to median digit height
        h_frac = h / float(med_h) if med_h > 0 else 1.0
        # Horizontal projection profile (sum along columns) and vertical profile (sum along rows)
        vproj = b.sum(axis=0)  # per column
        hproj = b.sum(axis=1)  # per row
        v_center_peak = vproj[len(vproj)//2] / (b.shape[0] + 1e-6)
        h_center_peak = hproj[len(hproj)//2] / (b.shape[1] + 1e-6)
        # Minus: flat, wide, short
        if ar >= MINUS_AR_THRESH and h_frac <= MINUS_HEIGHT_FRAC:
            return "-"
        # Plus: vertical & horizontal strong central strokes
        if v_center_peak >= PLUS_PEAK_FRAC and h_center_peak >= PLUS_PEAK_FRAC:
            return "+"
        return None
    def tokens_to_expression(self, tokens: list[str]) -> str:
        """
        Combine tokens into a valid expression.
        - Collapse consecutive digits into multi-digit numbers.
        - Keep '+' and '-' as operators.
        - Remove illegal leading/trailing operators.
        """
        # Collapse digits
        out = []
        num_buf = []
        for t in tokens:
            if t.isdigit():
                num_buf.append(t)
            else:
                # flush number
                if num_buf:
                    out.append("".join(num_buf))
                    num_buf = []
                # operator allowed only if last is number
                if len(out) > 0 and out[-1][-1].isdigit() and t in {"+", "-"}:
                    out.append(t)
        # flush at end
        if num_buf:
            out.append("".join(num_buf))
        # Join safely
        expr = ""
        for item in out:
            if item in {"+", "-"}:
                expr += f" {item} "
            else:
                expr += item
        return expr.strip()
if __name__ == "__main__":
    root = tk.Tk()
    app = MathSolverApp(root)
    root.mainloop()