Skip to content

Instructor classification with fields

The instructor project has a nice API to try and force structured output of an LLM. The basic "hello world" for a classifier with it can be built via:

# /// script
# dependencies = [
#   "openai", "python-dotenv", "pydantic", "instructor"
# ]
# ///
from dotenv import load_dotenv
from openai import OpenAI
import instructor
from pydantic import BaseModel, Field


load_dotenv()

class TextCat(BaseModel):
    """
    You are an expert text classification model.
    """
    new_dataset: bool
    synthetic: bool


client = instructor.from_openai(OpenAI())


def classify(data: str) -> TextCat:
    """Perform single-label classification on the input text."""
    return client.chat.completions.create(
        model="gpt-3.5-turbo-0613",
        response_model=TextCat,
        messages=[
            {
                "role": "user",
                "content": f"Classify the following text: {data}",
            },
        ],
    )  # type: ignore


if __name__ == "__main__":
    data = "The diffusion model has shown exceptional capabilities in controlled image generation, which has further fueled interest in image style transfer.Existing works mainly focus on training free-based methods (e.g., image inversion) due to the scarcity of specific data.In this study, we present a data construction pipeline for content-style-stylized image triplets that generates and automatically cleanses stylized data triplets.Based on this pipeline, we construct a dataset IMAGStyle, the first large-scale style transfer dataset containing 210k image triplets, available for the community to explore and research. 0.894Equipped with IMAGStyle, we propose CSGO, a style transfer model based on end-to-end training, which explicitly decouples content and style features employing independent feature injection.The unified CSGO implements image-driven style transfer, text-driven stylized synthesis, and text editing-driven stylized synthesis.Extensive experiments demonstrate the effectiveness of our approach in enhancing style control capabilities in image generation."
    print(dict(classify(data)))

But here is an improved version.

# /// script
# dependencies = [
#   "openai", "python-dotenv", "pydantic", "instructor"
# ]
# ///
from dotenv import load_dotenv
from openai import OpenAI
import instructor
from pydantic import BaseModel, Field


load_dotenv()

class TextCat(BaseModel):
    """
    You are an expert text classification model.
    """
    new_dataset: bool = Field(description="Indication that the abstract introduces a new dataset. It must be novel, not just a reference to a pre-existing benchmark.")
    synthetic: bool = Field(description="Indication that the abstract discusses synthetic data.")


client = instructor.from_openai(OpenAI())


def classify(data: str) -> TextCat:
    """Perform single-label classification on the input text."""
    return client.chat.completions.create(
        model="gpt-3.5-turbo-0613",
        response_model=TextCat,
        messages=[
            {
                "role": "user",
                "content": f"Classify the following text: {data}",
            },
        ],
    )  # type: ignore


if __name__ == "__main__":
    data = "The diffusion model has shown exceptional capabilities in controlled image generation, which has further fueled interest in image style transfer.Existing works mainly focus on training free-based methods (e.g., image inversion) due to the scarcity of specific data.In this study, we present a data construction pipeline for content-style-stylized image triplets that generates and automatically cleanses stylized data triplets.Based on this pipeline, we construct a dataset IMAGStyle, the first large-scale style transfer dataset containing 210k image triplets, available for the community to explore and research. 0.894Equipped with IMAGStyle, we propose CSGO, a style transfer model based on end-to-end training, which explicitly decouples content and style features employing independent feature injection.The unified CSGO implements image-driven style transfer, text-driven stylized synthesis, and text editing-driven stylized synthesis.Extensive experiments demonstrate the effectiveness of our approach in enhancing style control capabilities in image generation."
    print(dict(classify(data)))

Spot the improvement? It's the Field that's been added. That allows you to attach context by describing the label. Should help steer the LLM a whole lot.

Groq

Figured I might also add a working script for the groq API.

# /// script
# dependencies = [
#   "groq", "python-dotenv", "pydantic", "instructor"
# ]
# ///
import os
from pydantic import BaseModel, Field
from typing import List
from groq import Groq
import instructor
from dotenv import load_dotenv

load_dotenv()


class TextCat(BaseModel):
    """
    You are an expert text classification model.
    """
    new_dataset: bool = Field(description="Indication that the abstract introduces a new dataset. It must be novel, not just a reference to a pre-existing benchmark.")
    synthetic: bool = Field(description="Indication that the abstract discusses synthetic data.")


client = Groq(
    api_key=os.environ.get('GROQ_API_KEY'),
)

client = instructor.from_groq(client, mode=instructor.Mode.JSON)

def classify(data: str) -> TextCat:
    """Perform single-label classification on the input text."""
    return client.chat.completions.create(
        model="llama3-70b-8192",
        response_model=TextCat,
        messages=[
            {
                "role": "user",
                "content": f"Classify the following text: {data}",
            },
        ],
    )  # type: ignore


if __name__ == "__main__":
    data = "The diffusion model has shown exceptional capabilities in controlled image generation, which has further fueled interest in image style transfer.Existing works mainly focus on training free-based methods (e.g., image inversion) due to the scarcity of specific data.In this study, we present a data construction pipeline for content-style-stylized image triplets that generates and automatically cleanses stylized data triplets.Based on this pipeline, we construct a dataset IMAGStyle, the first large-scale style transfer dataset containing 210k image triplets, available for the community to explore and research. 0.894Equipped with IMAGStyle, we propose CSGO, a style transfer model based on end-to-end training, which explicitly decouples content and style features employing independent feature injection.The unified CSGO implements image-driven style transfer, text-driven stylized synthesis, and text editing-driven stylized synthesis.Extensive experiments demonstrate the effectiveness of our approach in enhancing style control capabilities in image generation."
    print(dict(classify(data)))

If you want to see the prompt/response, remember that you can always turn on logging:

logging.basicConfig(level=logging.DEBUG)