Network Speed Monitor

 pip install speedtest-cli matplotlib tk


import speedtest
import tkinter as tk
from tkinter import ttk
import threading
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg

# Function to test internet speed
def test_speed():
    st = speedtest.Speedtest()
    st.get_best_server()
    
    download_speed = round(st.download() / 1_000_000, 2)  # Convert to Mbps
    upload_speed = round(st.upload() / 1_000_000, 2)  # Convert to Mbps

    return download_speed, upload_speed

# Function to update the speed in GUI
def update_speed():
    global speeds_download, speeds_upload
    
    while True:
        download, upload = test_speed()
        speeds_download.append(download)
        speeds_upload.append(upload)

        lbl_download.config(text=f"Download Speed: {download} Mbps")
        lbl_upload.config(text=f"Upload Speed: {upload} Mbps")

        update_graph()
        root.update_idletasks()

# Function to update the speed graph
def update_graph():
    ax.clear()
    ax.plot(speeds_download, label="Download Speed (Mbps)", color="blue")
    ax.plot(speeds_upload, label="Upload Speed (Mbps)", color="red")
    
    ax.set_title("Network Speed Over Time")
    ax.set_xlabel("Test Count")
    ax.set_ylabel("Speed (Mbps)")
    ax.legend()
    ax.grid()

    canvas.draw()

# Tkinter GUI setup
root = tk.Tk()
root.title("Network Speed Monitor 🌐")
root.geometry("500x500")

lbl_title = tk.Label(root, text="šŸ“” Network Speed Monitor", font=("Arial", 14, "bold"))
lbl_title.pack(pady=10)

lbl_download = tk.Label(root, text="Download Speed: -- Mbps", font=("Arial", 12))
lbl_download.pack(pady=5)

lbl_upload = tk.Label(root, text="Upload Speed: -- Mbps", font=("Arial", 12))
lbl_upload.pack(pady=5)

# Matplotlib graph setup
fig, ax = plt.subplots(figsize=(5, 3))
speeds_download = []
speeds_upload = []

canvas = FigureCanvasTkAgg(fig, master=root)
canvas.get_tk_widget().pack()

# Run speed test in a separate thread
threading.Thread(target=update_speed, daemon=True).start()

root.mainloop()

Code Syntax Highlighter

 pip install pygments


from pygments import highlight
from pygments.lexers import guess_lexer, PythonLexer, JavascriptLexer, CLexer
from pygments.formatters import TerminalFormatter, HtmlFormatter

# Function to highlight code in terminal
def highlight_code_terminal(code, language="python"):
    # Choose lexer based on language
    lexer = {
        "python": PythonLexer(),
        "javascript": JavascriptLexer(),
        "c": CLexer(),
    }.get(language.lower(), guess_lexer(code))

    highlighted_code = highlight(code, lexer, TerminalFormatter())
    return highlighted_code


# Function to highlight code and save as HTML
def highlight_code_html(code, language="python", output_file="highlighted_code.html"):
    lexer = {
        "python": PythonLexer(),
        "javascript": JavascriptLexer(),
        "c": CLexer(),
    }.get(language.lower(), guess_lexer(code))

    formatter = HtmlFormatter(full=True, style="monokai")
    highlighted_code = highlight(code, lexer, formatter)

    # Save to an HTML file
    with open(output_file, "w", encoding="utf-8") as f:
        f.write(highlighted_code)
    
    print(f"✅ Highlighted code saved to {output_file}")

from pygments import highlight
from pygments.lexers import guess_lexer, PythonLexer, JavascriptLexer, CLexer
from pygments.formatters import TerminalFormatter, HtmlFormatter

# Function to highlight code in terminal
def highlight_code_terminal(code, language="python"):
    # Choose lexer based on language
    lexer = {
        "python": PythonLexer(),
        "javascript": JavascriptLexer(),
        "c": CLexer(),
    }.get(language.lower(), guess_lexer(code))

    highlighted_code = highlight(code, lexer, TerminalFormatter())
    return highlighted_code


# Function to highlight code and save as HTML
def highlight_code_html(code, language="python", output_file="highlighted_code.html"):
    lexer = {
        "python": PythonLexer(),
        "javascript": JavascriptLexer(),
        "c": CLexer(),
    }.get(language.lower(), guess_lexer(code))

    formatter = HtmlFormatter(full=True, style="monokai")
    highlighted_code = highlight(code, lexer, formatter)

    # Save to an HTML file
    with open(output_file, "w", encoding="utf-8") as f:
        f.write(highlighted_code)
    
    print(f"✅ Highlighted code saved to {output_file}")


# Example usage
if __name__ == "__main__":
    code_sample = """
    def greet(name):
        print(f"Hello, {name}!")
    
    greet("abc")
    """

    print("šŸŽØ Terminal Highlighted Code:")
    print(highlight_code_terminal(code_sample, "python"))

    highlight_code_html(code_sample, "python")


Resume Parser & Analyzer


pip install spacy pdfminer.six python-docx pandas nltk

python -m spacy download en_core_web_sm


import re

import spacy

import pdfminer.high_level

import docx

import nltk

from collections import Counter


nltk.download("stopwords")

from nltk.corpus import stopwords


# Load spaCy NLP model

nlp = spacy.load("en_core_web_sm")



# Function to extract text from PDF

def extract_text_from_pdf(pdf_path):

    return pdfminer.high_level.extract_text(pdf_path)



# Function to extract text from DOCX

def extract_text_from_docx(docx_path):

    doc = docx.Document(docx_path)

    return "\n".join([para.text for para in doc.paragraphs])



# Function to extract email from text

def extract_email(text):

    email_pattern = r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}"

    emails = re.findall(email_pattern, text)

    return emails[0] if emails else None



# Function to extract phone number from text

def extract_phone(text):

    phone_pattern = r"\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}"

    phones = re.findall(phone_pattern, text)

    return phones[0] if phones else None



# Function to extract skills from text

def extract_skills(text):

    skills_list = ["Python", "Java", "C++", "Machine Learning", "Data Science", "SQL", "Django", "React", "Flask"]

    found_skills = [skill for skill in skills_list if skill.lower() in text.lower()]

    return found_skills



# Function to extract name using NLP

def extract_name(text):

    doc = nlp(text)

    for ent in doc.ents:

        if ent.label_ == "PERSON":

            return ent.text

    return None



# Function to match skills with a job description

def match_skills(resume_skills, job_description):

    job_tokens = nltk.word_tokenize(job_description.lower())

    stop_words = set(stopwords.words("english"))

    filtered_job_tokens = [word for word in job_tokens if word not in stop_words]


    skill_match_count = sum(1 for skill in resume_skills if skill.lower() in filtered_job_tokens)

    match_percentage = (skill_match_count / len(resume_skills)) * 100 if resume_skills else 0

    return round(match_percentage, 2)



# Main function

def analyze_resume(file_path, job_description):

    # Extract text

    text = extract_text_from_pdf(file_path) if file_path.endswith(".pdf") else extract_text_from_docx(file_path)


    # Extract details

    name = extract_name(text)

    email = extract_email(text)

    phone = extract_phone(text)

    skills = extract_skills(text)

    match_percentage = match_skills(skills, job_description)


    # Display results

    print("\nšŸ“„ Resume Analysis Results:")

    print(f"šŸ‘¤ Name: {name}")

    print(f"šŸ“§ Email: {email}")

    print(f"šŸ“ž Phone: {phone}")

    print(f"šŸ›  Skills: {', '.join(skills)}")

    print(f"✅ Skill Match with Job: {match_percentage}%")


    return {"name": name, "email": email, "phone": phone, "skills": skills, "match_percentage": match_percentage}


Automated File Organizer

 import os

import shutil


# Define file categories with their extensions

FILE_CATEGORIES = {

    "Images": [".jpg", ".jpeg", ".png", ".gif", ".bmp", ".svg"],

    "Documents": [".pdf", ".docx", ".txt", ".xlsx", ".pptx", ".csv"],

    "Videos": [".mp4", ".avi", ".mkv", ".mov"],

    "Music": [".mp3", ".wav", ".flac"],

    "Archives": [".zip", ".rar", ".tar", ".gz"],

    "Programs": [".exe", ".msi", ".dmg"],

    "Others": []

}


def organize_files(folder_path):

    """Organizes files into categorized folders based on extensions."""

    

    if not os.path.exists(folder_path):

        print(f"Error: The folder '{folder_path}' does not exist.")

        return

    

    # Create folders if they don't exist

    for category in FILE_CATEGORIES.keys():

        category_path = os.path.join(folder_path, category)

        os.makedirs(category_path, exist_ok=True)


    # Iterate through all files in the folder

    for file_name in os.listdir(folder_path):

        file_path = os.path.join(folder_path, file_name)

        

        # Skip directories

        if os.path.isdir(file_path):

            continue

        

        # Get file extension

        file_ext = os.path.splitext(file_name)[1].lower()


        # Determine the correct category for the file

        destination_folder = "Others"  # Default category

        for category, extensions in FILE_CATEGORIES.items():

            if file_ext in extensions:

                destination_folder = category

                break


        # Move the file to the correct folder

        shutil.move(file_path, os.path.join(folder_path, destination_folder, file_name))

        print(f"Moved: {file_name} → {destination_folder}")


    print("✅ File organization completed successfully!")


# Run the script

if __name__ == "__main__":

    folder_to_organize = input("Enter the folder path to organize: ")

    organize_files(folder_to_organize)


Face Attendance System

 import cv2

import numpy as np

import face_recognition

import os

import csv

from datetime import datetime


# Folder containing images of known faces

KNOWN_FACES_DIR = "known_faces"

ATTENDANCE_FILE = "attendance.csv"


# Load known face encodings and names

known_encodings = []

known_names = []


for filename in os.listdir(KNOWN_FACES_DIR):

    image_path = os.path.join(KNOWN_FACES_DIR, filename)

    image = face_recognition.load_image_file(image_path)

    encoding = face_recognition.face_encodings(image)[0]

    known_encodings.append(encoding)

    known_names.append(os.path.splitext(filename)[0])  # Remove file extension


# Initialize webcam

video_capture = cv2.VideoCapture(0)


# Function to mark attendance

def mark_attendance(name):

    with open(ATTENDANCE_FILE, "a", newline="") as file:

        writer = csv.writer(file)

        now = datetime.now()

        time_str = now.strftime("%H:%M:%S")

        date_str = now.strftime("%Y-%m-%d")

        writer.writerow([name, date_str, time_str])


# Create CSV file with headers if it doesn't exist

if not os.path.exists(ATTENDANCE_FILE):

    with open(ATTENDANCE_FILE, "w", newline="") as file:

        writer = csv.writer(file)

        writer.writerow(["Name", "Date", "Time"])


# Process video stream

while True:

    ret, frame = video_capture.read()

    if not ret:

        break


    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)  # Optimize performance

    rgb_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB)


    # Detect faces in the frame

    face_locations = face_recognition.face_locations(rgb_frame)

    face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)


    for encoding, location in zip(face_encodings, face_locations):

        matches = face_recognition.compare_faces(known_encodings, encoding)

        name = "Unknown"


        if True in matches:

            index = matches.index(True)

            name = known_names[index]

            mark_attendance(name)  # Mark attendance in CSV


        # Draw bounding box around detected face

        top, right, bottom, left = [v * 4 for v in location]

        cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)

        cv2.putText(frame, name, (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)


    cv2.imshow("Face Attendance System", frame)


    if cv2.waitKey(1) & 0xFF == ord("q"):

        break


video_capture.release()

cv2.destroyAllWindows()