{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "DgpubXociwNK" }, "source": [ "## **Gliese-OCR-7B-Post1.0(4-bit)**" ] }, { "cell_type": "markdown", "metadata": { "id": "Nb3wNhothvX7" }, "source": [ "The Gliese-OCR-7B-Post1.0 model is a fine-tuned version of Camel-Doc-OCR-062825, optimized for Document Retrieval, Content Extraction, and Analysis Recognition. Built on top of the Qwen2.5-VL architecture, this model enhances document comprehension capabilities with focused training on the Opendoc2-Analysis-Recognition dataset for superior document analysis and information extraction tasks.\n", "\n", " > This model shows significant improvements in LaTeX rendering and Markdown rendering for OCR tasks.\n", "\n", "| Image1 | Image2 |\n", "|--------|--------|\n", "|  |  |\n", "\n", "*multimodal model & notebook by: [prithivMLmods](https://huggingface.co/prithivMLmods)*" ] }, { "cell_type": "markdown", "metadata": { "id": "Mk560Wx0j6PY" }, "source": [ "### **Install packages**" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "qTD_dNliNS5T" }, "outputs": [], "source": [ "%%capture\n", "!pip install git+https://github.com/huggingface/transformers.git \\\n", " git+https://github.com/huggingface/accelerate.git \\\n", " git+https://github.com/huggingface/peft.git \\\n", " transformers-stream-generator huggingface_hub albumentations \\\n", " pyvips-binary qwen-vl-utils sentencepiece opencv-python docling-core \\\n", " python-docx torchvision safetensors matplotlib num2words \\\n", "\n", "!pip install xformers requests pymupdf hf_xet spaces pyvips pillow gradio \\\n", " einops torch fpdf timm av decord bitsandbytes reportlab\n", "#Hold tight, this will take around 1-2 minutes." ] }, { "cell_type": "markdown", "metadata": { "id": "uiBblyf-kLmf" }, "source": [ "### **Run Demo App**" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "pgz93DfvNMfb" }, "outputs": [], "source": [ "import spaces\n", "import json\n", "import math\n", "import os\n", "import traceback\n", "from io import BytesIO\n", "from typing import Any, Dict, List, Optional, Tuple\n", "import re\n", "import time\n", "from threading import Thread\n", "from io import BytesIO\n", "import uuid\n", "import tempfile\n", "\n", "import gradio as gr\n", "import requests\n", "import torch\n", "from PIL import Image\n", "import fitz\n", "import numpy as np\n", "\n", "# --- New Model Imports ---\n", "from transformers import (\n", " Qwen2_5_VLForConditionalGeneration,\n", " AutoProcessor,\n", " TextIteratorStreamer,\n", " BitsAndBytesConfig,\n", ")\n", "\n", "from reportlab.lib.pagesizes import A4\n", "from reportlab.lib.styles import getSampleStyleSheet\n", "from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer\n", "from reportlab.lib.units import inch\n", "\n", "# --- Constants and Model Setup ---\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "print(\"CUDA_VISIBLE_DEVICES=\", os.environ.get(\"CUDA_VISIBLE_DEVICES\"))\n", "print(\"torch.__version__ =\", torch.__version__)\n", "print(\"torch.version.cuda =\", torch.version.cuda)\n", "print(\"cuda available:\", torch.cuda.is_available())\n", "print(\"cuda device count:\", torch.cuda.device_count())\n", "if torch.cuda.is_available():\n", " print(\"current device:\", torch.cuda.current_device())\n", " print(\"device name:\", torch.cuda.get_device_name(torch.cuda.current_device()))\n", "\n", "print(\"Using device:\", device)\n", "\n", "\n", "# --- Model Loading (Updated for Qwen2.5-VL) ---\n", "\n", "# Define model options\n", "MODEL_OPTIONS = {\n", " \"Gliese-OCR-7B-Post1.0\": \"prithivMLmods/Gliese-OCR-7B-Post1.0\",\n", "}\n", "\n", "# Define 4-bit quantization configuration\n", "# This config will load the model in 4-bit to save VRAM.\n", "quantization_config = BitsAndBytesConfig(\n", " load_in_4bit=True,\n", " bnb_4bit_compute_dtype=torch.float16,\n", " bnb_4bit_quant_type=\"nf4\",\n", " bnb_4bit_use_double_quant=True,\n", ")\n", "\n", "# Preload models and processors into CUDA\n", "models = {}\n", "processors = {}\n", "for name, model_id in MODEL_OPTIONS.items():\n", " print(f\"Loading {name}๐ค. This will use 4-bit quantization to save VRAM.\")\n", " models[name] = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n", " model_id,\n", " trust_remote_code=True,\n", " quantization_config=quantization_config,\n", " device_map=\"auto\"\n", " )\n", " processors[name] = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)\n", "print(\"Model loaded successfully.\")\n", "\n", "\n", "# --- PDF Generation and Preview Utility Function (Unchanged) ---\n", "def generate_and_preview_pdf(image: Image.Image, text_content: str, font_size: int, line_spacing: float, alignment: str, image_size: str):\n", " \"\"\"\n", " Generates a PDF, saves it, and then creates image previews of its pages.\n", " Returns the path to the PDF and a list of paths to the preview images.\n", " \"\"\"\n", " if image is None or not text_content or not text_content.strip():\n", " raise gr.Error(\"Cannot generate PDF. Image or text content is missing.\")\n", "\n", " # --- 1. Generate the PDF ---\n", " temp_dir = tempfile.gettempdir()\n", " pdf_filename = os.path.join(temp_dir, f\"output_{uuid.uuid4()}.pdf\")\n", " doc = SimpleDocTemplate(\n", " pdf_filename,\n", " pagesize=A4,\n", " rightMargin=inch, leftMargin=inch,\n", " topMargin=inch, bottomMargin=inch\n", " )\n", " styles = getSampleStyleSheet()\n", " style_normal = styles[\"Normal\"]\n", " style_normal.fontSize = int(font_size)\n", " style_normal.leading = int(font_size) * line_spacing\n", " style_normal.alignment = {\"Left\": 0, \"Center\": 1, \"Right\": 2, \"Justified\": 4}[alignment]\n", "\n", " story = []\n", "\n", " img_buffer = BytesIO()\n", " image.save(img_buffer, format='PNG')\n", " img_buffer.seek(0)\n", "\n", " page_width, _ = A4\n", " available_width = page_width - 2 * inch\n", " image_widths = {\n", " \"Small\": available_width * 0.3,\n", " \"Medium\": available_width * 0.6,\n", " \"Large\": available_width * 0.9,\n", " }\n", " img_width = image_widths[image_size]\n", " # Create a ReportLab Image object, handling potential transparency\n", " img = RLImage(img_buffer, width=img_width, height=image.height * (img_width / image.width))\n", " story.append(img)\n", " story.append(Spacer(1, 12))\n", "\n", " # Clean the text for PDF generation\n", " cleaned_text = re.sub(r'#+\\s*', '', text_content).replace(\"*\", \"\")\n", " text_paragraphs = cleaned_text.split('\\n')\n", "\n", " for para in text_paragraphs:\n", " if para.strip():\n", " story.append(Paragraph(para, style_normal))\n", "\n", " doc.build(story)\n", "\n", " # --- 2. Render PDF pages as images for preview ---\n", " preview_images = []\n", " try:\n", " pdf_doc = fitz.open(pdf_filename)\n", " for page_num in range(len(pdf_doc)):\n", " page = pdf_doc.load_page(page_num)\n", " pix = page.get_pixmap(dpi=150)\n", " preview_img_path = os.path.join(temp_dir, f\"preview_{uuid.uuid4()}_p{page_num}.png\")\n", " pix.save(preview_img_path)\n", " preview_images.append(preview_img_path)\n", " pdf_doc.close()\n", " except Exception as e:\n", " print(f\"Error generating PDF preview: {e}\")\n", "\n", " return pdf_filename, preview_images\n", "\n", "\n", "# --- Core Application Logic (Updated for Qwen2.5-VL with Streaming) ---\n", "@spaces.GPU\n", "def process_document(\n", " image: Image.Image,\n", " prompt_input: str,\n", " max_new_tokens: int,\n", " temperature: float,\n", " top_p: float,\n", " top_k: int,\n", " repetition_penalty: float\n", "):\n", " \"\"\"\n", " Main function that handles model inference for the Qwen model with streaming.\n", " This function is a generator, yielding text as it is generated.\n", " \"\"\"\n", " if image is None:\n", " yield \"Please upload an image.\", \"Please upload an image.\"\n", " return\n", " if not prompt_input or not prompt_input.strip():\n", " yield \"Please enter a prompt.\", \"Please enter a prompt.\"\n", " return\n", "\n", " model_name = \"Gliese-OCR-7B-Post1.0\"\n", " model = models[model_name]\n", " processor = processors[model_name]\n", "\n", " messages = [\n", " {\n", " \"role\": \"user\",\n", " \"content\": [\n", " {\"type\": \"image\", \"image\": image},\n", " {\"type\": \"text\", \"text\": prompt_input},\n", " ],\n", " }\n", " ]\n", "\n", " text = processor.apply_chat_template(\n", " messages, tokenize=False, add_generation_prompt=True\n", " )\n", " inputs = processor(\n", " text=[text],\n", " images=[image],\n", " padding=True,\n", " return_tensors=\"pt\",\n", " ).to(\"cuda\")\n", "\n", " streamer = TextIteratorStreamer(\n", " processor.tokenizer, skip_prompt=True, skip_special_tokens=True\n", " )\n", "\n", " generation_kwargs = dict(\n", " inputs,\n", " streamer=streamer,\n", " max_new_tokens=max_new_tokens,\n", " temperature=temperature,\n", " top_p=top_p,\n", " top_k=top_k,\n", " repetition_penalty=repetition_penalty,\n", " do_sample=True if temperature > 0 else False,\n", " )\n", "\n", " thread = Thread(target=model.generate, kwargs=generation_kwargs)\n", " thread.start()\n", "\n", " buffer = \"\"\n", " for new_text in streamer:\n", " buffer += new_text\n", " # Remove special tokens from the output stream\n", " clean_buffer = buffer.replace(\"<|im_end|>\", \"\").replace(\"<|endoftext|>\", \"\")\n", " yield clean_buffer, clean_buffer\n", "\n", "# --- Gradio UI Definition (Updated Title, otherwise unchanged) ---\n", "def create_gradio_interface():\n", " \"\"\"Builds and returns the Gradio web interface.\"\"\"\n", " css = \"\"\"\n", " .main-container { max-width: 1400px; margin: 0 auto; }\n", " .process-button { border: none !important; color: white !important; font-weight: bold !important; background-color: blue !important;}\n", " .process-button:hover { background-color: darkblue !important; transform: translateY(-2px) !important; box-shadow: 0 4px 8px rgba(0,0,0,0.2) !important; }\n", " #gallery { min-height: 400px; }\n", " \"\"\"\n", " with gr.Blocks(theme=\"bethecloud/storj_theme\", css=css) as demo:\n", " gr.HTML(f\"\"\"\n", "
\n", " Image Content Extraction and Markdown Rendering \n", "
\n", "