Loading...
Loading...
Use ONLY when creating NEW registrable components in ML projects that require Factory/Registry patterns. ✅ USE when: - Creating a new Dataset class (needs @register_dataset) - Creating a new Model class (needs @register_model) - Creating a new module directory with __init__.py factory - Initializing a new ML project structure from scratch - Adding new component types (Augmentation, CollateFunction, Metrics) ❌ DO NOT USE when: - Modifying existing functions or methods - Fixing bugs in existing code - Adding helper functions or utilities - Refactoring without adding new registrable components - Simple code changes to a single file - Modifying configuration files - Reading or understanding existing code Key indicator: Does the task require @register_* decorator or Factory pattern? If no, skip this skill.
npx skill4agent add galaxy-dawn/claude-scholar architecture-design# Example from data_module/dataset/__init__.py
DATASET_FACTORY: Dict = {}
def DatasetFactory(data_name: str):
dataset = DATASET_FACTORY.get(data_name, None)
if dataset is None:
print(f"{data_name} dataset is not implementation, use simple dataset")
dataset = DATASET_FACTORY.get('simple')
return datasetreferences/factory_pattern.md# Example from data_module/dataset/simple_dataset.py
@register_dataset("simple")
class SimpleDataset(Dataset):
def __init__(self, data):
self.data = datareferences/registry_pattern.md# Example from data_module/dataset/__init__.py
models_dir = os.path.dirname(__file__)
import_modules(models_dir, "src.data_module.dataset")references/auto_import.mdproject/
├── run/
│ ├── pipeline/ # Main workflow scripts
│ │ ├── training/ # Training pipelines
│ │ ├── prepare_data/ # Data preparation pipelines
│ │ └── analysis/ # Analysis pipelines
│ └── conf/ # Hydra configuration files
│ ├── training/ # Training configs
│ ├── dataset/ # Dataset configs
│ ├── model/ # Model configs
│ ├── prepare_data/ # Data prep configs
│ └── analysis/ # Analysis configs
│
├── src/
│ ├── data_module/ # Data processing module
│ │ ├── dataset/ # Dataset implementations
│ │ ├── augmentation/ # Data augmentation
│ │ ├── collate_fn/ # Collate functions
│ │ ├── compute_metrics/ # Metrics computation
│ │ ├── prepare_data/ # Data preparation logic
│ │ ├── data_func/ # Data utility functions
│ │ └── utils.py # Module-specific utilities
│ │
│ ├── model_module/ # Model implementations
│ │ ├── brain_decoder/ # Brain decoder models
│ │ └── model/ # Alternative model location
│ │
│ ├── trainer_module/ # Training logic
│ ├── analysis_module/ # Analysis and evaluation
│ ├── llm/ # LLM-related code
│ └── utils/ # Shared utilities
│
├── data/
│ ├── raw/ # Original, immutable data
│ ├── processed/ # Cleaned, transformed data
│ └── external/ # Third-party data
│
├── outputs/
│ ├── logs/ # Training and evaluation logs
│ ├── checkpoints/ # Model checkpoints
│ ├── tables/ # Result tables
│ └── figures/ # Plots and visualizations
│
├── pyproject.toml # Project configuration
├── uv.lock # Dependency lock file
├── TODO.md # Task tracking
├── README.md # Project documentation
└── .gitignore # Git ignore rulesreferences/structure.mdsrc/data_module/dataset/@register_dataset("name")torch.utils.data.Dataset__init____len____getitem__from torch.utils.data import Dataset
from typing import Dict
import torch
from src.data_module.dataset import register_dataset
@register_dataset("custom")
class CustomDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i: int) -> Dict[str, torch.Tensor]:
return self.data[i]src/model_module/model/@register_model('ModelName')__init__cfgforward(){"loss": loss, "labels": labels, "logits": logits}self.trainingfrom src.model_module.brain_decoder import register_model
@register_model('MyModel')
class MyModel(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.task = cfg.dataset.task
# ALL parameters from cfg
self.hidden_dim = cfg.model.hidden_dim
self.output_dim = cfg.dataset.target_size[cfg.dataset.task]
def forward(self, x, labels=None, **kwargs):
if self.training:
# Training logic
pass
else:
# Inference logic
pass
return {"loss": loss, "labels": labels, "logits": logits}src/data_module/augmentation/references/code_style.md__init__.pyrun/conf/references/structure.mdreferences/factory_pattern.mdreferences/registry_pattern.mdreferences/auto_import.mdreferences/code_style.mdexamples/examples/custom_dataset.pyexamples/custom_model.pyexamples/augmentation_example.pyexamples/config_example.yamlexamples/pipeline_example.sh