Smart Log File Analyzer

import re

import pandas as pd

import numpy as np

from sklearn.ensemble import IsolationForest

import matplotlib.pyplot as plt


# -------------------------------------

# Log Pattern (Apache Common Log)

# -------------------------------------

log_pattern = re.compile(

    r'(?P<ip>\S+) - - \[(?P<time>.*?)\] '

    r'"(?P<method>\S+) (?P<url>\S+) \S+" '

    r'(?P<status>\d+) (?P<size>\d+)'

)


# -------------------------------------

# Parse Log File

# -------------------------------------

def parse_log(file_path):

    data = []


    with open(file_path, "r", encoding="utf-8", errors="ignore") as f:

        for line in f:

            match = log_pattern.search(line)

            if match:

                data.append(match.groupdict())


    df = pd.DataFrame(data)


    df["status"] = df["status"].astype(int)

    df["size"] = df["size"].astype(int)


    return df



# -------------------------------------

# Feature Engineering

# -------------------------------------

def extract_features(df):

    ip_counts = df.groupby("ip").size().reset_index(name="request_count")

    status_4xx = df[df["status"].between(400, 499)].groupby("ip").size().reset_index(name="errors")

    status_5xx = df[df["status"].between(500, 599)].groupby("ip").size().reset_index(name="server_errors")


    features = ip_counts.merge(status_4xx, on="ip", how="left")

    features = features.merge(status_5xx, on="ip", how="left")


    features.fillna(0, inplace=True)


    return features



# -------------------------------------

# Anomaly Detection

# -------------------------------------

def detect_anomalies(features):

    model = IsolationForest(contamination=0.05, random_state=42)


    X = features[["request_count", "errors", "server_errors"]]


    features["anomaly_score"] = model.fit_predict(X)

    features["is_suspicious"] = features["anomaly_score"] == -1


    return features



# -------------------------------------

# Visualization

# -------------------------------------

def visualize(features):

    plt.figure(figsize=(10, 5))


    normal = features[features["is_suspicious"] == False]

    suspicious = features[features["is_suspicious"] == True]


    plt.scatter(normal["request_count"], normal["errors"], label="Normal")

    plt.scatter(suspicious["request_count"], suspicious["errors"], label="Suspicious", marker="x")


    plt.xlabel("Request Count")

    plt.ylabel("4xx Errors")

    plt.legend()

    plt.title("Log Anomaly Detection")

    plt.show()



# -------------------------------------

# MAIN

# -------------------------------------

if __name__ == "__main__":

    path = input("Enter log file path: ").strip()


    print("Parsing logs...")

    df = parse_log(path)


    print(f"Loaded {len(df)} log entries.")


    features = extract_features(df)


    print("Detecting anomalies...")

    analyzed = detect_anomalies(features)


    suspicious = analyzed[analyzed["is_suspicious"] == True]


    print("\n Suspicious IP Addresses:\n")

    print(suspicious[["ip", "request_count", "errors", "server_errors"]])


    visualize(analyzed)


    analyzed.to_csv("log_analysis_report.csv", index=False)

    print("\n Report saved as log_analysis_report.csv")


Image Perspective Corrector

import cv2

import numpy as np


# ----------------------------------

# Order points correctly

# ----------------------------------

def order_points(pts):

    rect = np.zeros((4, 2), dtype="float32")


    s = pts.sum(axis=1)

    rect[0] = pts[np.argmin(s)]   # top-left

    rect[2] = pts[np.argmax(s)]   # bottom-right


    diff = np.diff(pts, axis=1)

    rect[1] = pts[np.argmin(diff)]  # top-right

    rect[3] = pts[np.argmax(diff)]  # bottom-left


    return rect



# ----------------------------------

# Perspective transform

# ----------------------------------

def four_point_transform(image, pts):

    rect = order_points(pts)

    (tl, tr, br, bl) = rect


    widthA = np.linalg.norm(br - bl)

    widthB = np.linalg.norm(tr - tl)

    maxWidth = max(int(widthA), int(widthB))


    heightA = np.linalg.norm(tr - br)

    heightB = np.linalg.norm(tl - bl)

    maxHeight = max(int(heightA), int(heightB))


    dst = np.array([

        [0, 0],

        [maxWidth - 1, 0],

        [maxWidth - 1, maxHeight - 1],

        [0, maxHeight - 1]

    ], dtype="float32")


    M = cv2.getPerspectiveTransform(rect, dst)

    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))


    return warped



# ----------------------------------

# Detect document contour

# ----------------------------------

def detect_document(image):

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    blurred = cv2.GaussianBlur(gray, (5, 5), 0)


    edged = cv2.Canny(blurred, 75, 200)


    contours, _ = cv2.findContours(

        edged.copy(),

        cv2.RETR_LIST,

        cv2.CHAIN_APPROX_SIMPLE

    )


    contours = sorted(contours, key=cv2.contourArea, reverse=True)


    for contour in contours:

        peri = cv2.arcLength(contour, True)

        approx = cv2.approxPolyDP(contour, 0.02 * peri, True)


        if len(approx) == 4:

            return approx.reshape(4, 2)


    return None



# ----------------------------------

# Main

# ----------------------------------

if __name__ == "__main__":

    path = input("Enter image path: ").strip()


    image = cv2.imread(path)


    if image is None:

        print(" Could not load image.")

        exit()


    orig = image.copy()

    doc_cnt = detect_document(image)


    if doc_cnt is None:

        print(" Document edges not detected.")

        exit()


    warped = four_point_transform(orig, doc_cnt)


    # Convert to scanned look

    warped_gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

    warped_thresh = cv2.adaptiveThreshold(

        warped_gray, 255,

        cv2.ADAPTIVE_THRESH_GAUSSIAN_C,

        cv2.THRESH_BINARY,

        11, 2

    )


    cv2.imshow("Original", orig)

    cv2.imshow("Scanned Output", warped_thresh)


    cv2.imwrite("scanned_output.jpg", warped_thresh)

    print(" Saved as scanned_output.jpg")


    cv2.waitKey(0)

    cv2.destroyAllWindows()


Sudoku Solver with Step Explanation

 import copy


# ---------------------------------

# Helper Functions

# ---------------------------------


def print_board(board):

    for i in range(9):

        row = ""

        for j in range(9):

            row += str(board[i][j]) + " "

            if j % 3 == 2 and j != 8:

                row += "| "

        print(row)

        if i % 3 == 2 and i != 8:

            print("-" * 21)

    print()



def find_empty(board):

    for r in range(9):

        for c in range(9):

            if board[r][c] == 0:

                return r, c

    return None



def is_valid(board, num, row, col):

    # Row

    if num in board[row]:

        return False


    # Column

    for r in range(9):

        if board[r][col] == num:

            return False


    # 3x3 box

    box_row = (row // 3) * 3

    box_col = (col // 3) * 3


    for r in range(box_row, box_row + 3):

        for c in range(box_col, box_col + 3):

            if board[r][c] == num:

                return False


    return True



# ---------------------------------

# Constraint Propagation

# ---------------------------------


def get_candidates(board, row, col):

    return [num for num in range(1, 10) if is_valid(board, num, row, col)]



def apply_constraint_propagation(board, steps):

    changed = True

    while changed:

        changed = False

        for r in range(9):

            for c in range(9):

                if board[r][c] == 0:

                    candidates = get_candidates(board, r, c)

                    if len(candidates) == 1:

                        board[r][c] = candidates[0]

                        steps.append(

                            f"Naked Single: Placed {candidates[0]} at ({r+1},{c+1})"

                        )

                        changed = True

    return board



# ---------------------------------

# Backtracking with Explanation

# ---------------------------------


def solve(board, steps):

    board = apply_constraint_propagation(board, steps)


    empty = find_empty(board)

    if not empty:

        return True


    row, col = empty

    candidates = get_candidates(board, row, col)


    for num in candidates:

        steps.append(f"Trying {num} at ({row+1},{col+1})")

        board[row][col] = num


        if solve(board, steps):

            return True


        steps.append(f"Backtracking from ({row+1},{col+1})")

        board[row][col] = 0


    return False



# ---------------------------------

# Example Puzzle

# ---------------------------------


if __name__ == "__main__":

    puzzle = [

        [5,3,0,0,7,0,0,0,0],

        [6,0,0,1,9,5,0,0,0],

        [0,9,8,0,0,0,0,6,0],

        [8,0,0,0,6,0,0,0,3],

        [4,0,0,8,0,3,0,0,1],

        [7,0,0,0,2,0,0,0,6],

        [0,6,0,0,0,0,2,8,0],

        [0,0,0,4,1,9,0,0,5],

        [0,0,0,0,8,0,0,7,9],

    ]


    print("Initial Puzzle:\n")

    print_board(puzzle)


    steps = []

    board_copy = copy.deepcopy(puzzle)


    if solve(board_copy, steps):

        print("Solved Puzzle:\n")

        print_board(board_copy)


        print("Step-by-Step Explanation:\n")

        for step in steps:

            print(step)

    else:

        print("No solution found.")


Real-Time CPU Thermal Visualizer

import psutil

import numpy as np

import matplotlib.pyplot as plt

import matplotlib.animation as animation

import random


# -----------------------------

# CONFIG

# -----------------------------

GRID_SIZE = 4   # 4x4 grid heat map

UPDATE_INTERVAL = 1000  # milliseconds



# -----------------------------

# Get CPU Temperature

# -----------------------------

def get_cpu_temp():

    temps = psutil.sensors_temperatures()


    # Try common sensor labels

    for name in temps:

        for entry in temps[name]:

            if "cpu" in entry.label.lower() or "package" in entry.label.lower():

                return entry.current


    return None



# -----------------------------

# Generate Heat Map Data

# -----------------------------

def generate_heat_data():

    temp = get_cpu_temp()


    if temp is None:

        # Simulate realistic CPU temperature (for unsupported systems)

        base_temp = random.uniform(40, 70)

    else:

        base_temp = temp


    # Create per-core variation

    heat_map = np.random.normal(loc=base_temp, scale=2.0, size=(GRID_SIZE, GRID_SIZE))


    return heat_map



# -----------------------------

# Visualization Setup

# -----------------------------

fig, ax = plt.subplots()

heat_data = generate_heat_data()


heatmap = ax.imshow(

    heat_data,

    cmap="inferno",

    vmin=30,

    vmax=90

)


cbar = plt.colorbar(heatmap)

cbar.set_label("CPU Temperature (°C)")


ax.set_title("Real-Time CPU Thermal Heat Map")



# -----------------------------

# Animation Update Function

# -----------------------------

def update(frame):

    new_data = generate_heat_data()

    heatmap.set_data(new_data)


    avg_temp = np.mean(new_data)

    ax.set_title(f"Real-Time CPU Thermal Heat Map | Avg Temp: {avg_temp:.2f}°C")


    return [heatmap]



# -----------------------------

# Run Animation

# -----------------------------

ani = animation.FuncAnimation(

    fig,

    update,

    interval=UPDATE_INTERVAL

)


plt.show()


Cognitive Load Tracker

 import time

import threading

import psutil

import pandas as pd

import matplotlib.pyplot as plt

from pynput import keyboard, mouse

import win32gui


# -------------------------------

# GLOBAL METRICS

# -------------------------------

keystrokes = 0

mouse_moves = 0

window_switches = 0


current_window = None

data_log = []


TRACK_DURATION = 120  # seconds (change if needed)

INTERVAL = 5          # log every 5 seconds



# -------------------------------

# Keyboard Listener

# -------------------------------

def on_key_press(key):

    global keystrokes

    keystrokes += 1



# -------------------------------

# Mouse Listener

# -------------------------------

def on_move(x, y):

    global mouse_moves

    mouse_moves += 1



# -------------------------------

# Window Change Detection

# -------------------------------

def track_active_window():

    global current_window, window_switches


    while True:

        try:

            window = win32gui.GetForegroundWindow()

            title = win32gui.GetWindowText(window)


            if current_window and title != current_window:

                window_switches += 1


            current_window = title

        except:

            pass


        time.sleep(1)



# -------------------------------

# Logging Thread

# -------------------------------

def log_metrics():

    global keystrokes, mouse_moves, window_switches


    start_time = time.time()


    while time.time() - start_time < TRACK_DURATION:

        time.sleep(INTERVAL)


        fatigue_score = calculate_fatigue(

            keystrokes, mouse_moves, window_switches

        )


        data_log.append({

            "time": time.time() - start_time,

            "keystrokes": keystrokes,

            "mouse_moves": mouse_moves,

            "window_switches": window_switches,

            "fatigue_score": fatigue_score

        })


        print(f"Logged: KS={keystrokes}, MM={mouse_moves}, WS={window_switches}, Fatigue={fatigue_score:.2f}")


        keystrokes = 0

        mouse_moves = 0

        window_switches = 0


    print("\nTracking Complete.")

    visualize_results()



# -------------------------------

# Fatigue Calculation Logic

# -------------------------------

def calculate_fatigue(ks, mm, ws):

    """

    Heuristic logic:

    - High window switching → distraction

    - Low typing speed → fatigue

    - Erratic mouse movement → overload

    """


    fatigue = 0


    if ks < 20:

        fatigue += 30

    if ws > 5:

        fatigue += 40

    if mm > 500:

        fatigue += 20


    return min(fatigue, 100)



# -------------------------------

# Visualization

# -------------------------------

def visualize_results():

    df = pd.DataFrame(data_log)


    plt.figure(figsize=(10, 5))

    plt.plot(df["time"], df["fatigue_score"], marker="o")

    plt.xlabel("Time (seconds)")

    plt.ylabel("Estimated Cognitive Load")

    plt.title("Cognitive Load Over Time")

    plt.grid(True)

    plt.show()


    df.to_csv("cognitive_load_report.csv", index=False)

    print(" Report saved as cognitive_load_report.csv")



# -------------------------------

# MAIN

# -------------------------------

if __name__ == "__main__":

    print("Cognitive Load Tracker Started")

    print(f"Tracking for {TRACK_DURATION} seconds...\n")


    keyboard_listener = keyboard.Listener(on_press=on_key_press)

    mouse_listener = mouse.Listener(on_move=on_move)


    keyboard_listener.start()

    mouse_listener.start()


    window_thread = threading.Thread(target=track_active_window, daemon=True)

    window_thread.start()


    log_metrics()