Loading...
Loading...
This skill should be used when working with pre-trained transformer models for natural language processing, computer vision, audio, or multimodal tasks. Use for text generation, classification, question answering, translation, summarization, image classification, object detection, speech recognition, and fine-tuning models on custom datasets.
npx skill4agent add davila7/claude-code-templates transformersuv pip install torch transformers datasets evaluate accelerateuv pip install timm pillowuv pip install librosa soundfilefrom huggingface_hub import login
login() # Follow prompts to enter tokenexport HUGGINGFACE_TOKEN="your_token_here"from transformers import pipeline
# Text generation
generator = pipeline("text-generation", model="gpt2")
result = generator("The future of AI is", max_length=50)
# Text classification
classifier = pipeline("text-classification")
result = classifier("This movie was excellent!")
# Question answering
qa = pipeline("question-answering")
result = qa(question="What is AI?", context="AI is artificial intelligence...")references/pipelines.mdreferences/models.mdreferences/generation.mdreferences/training.mdreferences/tokenizers.mdpipe = pipeline("task-name", model="model-id")
output = pipe(input_data)from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("model-id")
model = AutoModelForCausalLM.from_pretrained("model-id", device_map="auto")
inputs = tokenizer("text", return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=100)
result = tokenizer.decode(outputs[0])from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir="./results",
num_train_epochs=3,
per_device_train_batch_size=8,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
trainer.train()references/pipelines.mdreferences/models.mdreferences/generation.mdreferences/training.mdreferences/tokenizers.md