Loading...
Loading...
Designs and implements state transition analysis systems for tracking time spent in different states. Use when analyzing workflows with state changes (Jira, GitHub PRs, deployments, support tickets, etc.). Covers state machine fundamentals, temporal calculations, bottleneck detection, and business metrics. Trigger keywords: "state analysis", "duration tracking", "workflow metrics", "bottleneck", "cycle time", "state transitions", "time in status", "how long", "state duration", "workflow performance", "state machine", "changelog analysis", "SLA tracking", "process metrics".
npx skill4agent add dawiddutoit/custom-claude design-jira-state-analyzerfrom jira_tool.analysis.state_analyzer import StateDurationAnalyzer
from jira_tool.client import JiraClient
# Fetch issue with changelog
client = JiraClient()
issue = client.get_issue("PROJ-123", expand=["changelog"])
# Analyze state durations
analyzer = StateDurationAnalyzer()
durations = analyzer.analyze_issue(issue)
# Get results
for duration in durations:
print(f"{duration.state}: {duration.calendar_days} days, {duration.business_hours} business hours")# Export issues with changelog
uv run jira-tool search "project = PROJ" --expand changelog --format json -o issues.json
# Analyze state durations
uv run jira-tool analyze state-durations issues.json -o durations.csv --business-hoursCreated → To Do → In Progress → Review → DoneCreated → In Review → Approved → MergedQueued → Building → Testing → Staging → ProductionNew → Assigned → Investigating → Resolved → Closedfrom dataclasses import dataclass
from datetime import datetime
@dataclass
class StateTransition:
timestamp: datetime # When it changed
from_state: str | None # Previous state (None if created)
to_state: str # New state
author: str | None # Who made the change{
"created": "2024-01-15T10:30:00Z",
"changelog": {
"histories": [
{
"created": "2024-01-15T10:30:00Z",
"items": [
{
"field": "status",
"fromString": null,
"toString": "To Do"
}
]
},
{
"created": "2024-01-16T09:00:00Z",
"items": [
{
"field": "status",
"fromString": "To Do",
"toString": "In Progress"
}
]
}
]
}
}from datetime import datetime, timedelta, UTC
@dataclass
class StateDuration:
state: str
start_time: datetime
end_time: datetime | None # None if still in state
calendar_days: float
business_hours: float
def calculate_calendar_days(start: datetime, end: datetime) -> float:
"""Calculate elapsed calendar days."""
delta = end - start
return delta.total_seconds() / (24 * 3600)
def calculate_business_hours(
start: datetime,
end: datetime,
business_start: int = 9, # 9 AM
business_end: int = 17, # 5 PM
) -> float:
"""Calculate time within business hours (Mon-Fri, 9-5)."""
current = start
hours = 0.0
while current < end:
# Only count weekdays
if current.weekday() < 5: # Mon=0, Fri=4
day_start = current.replace(hour=business_start, minute=0, second=0)
day_end = current.replace(hour=business_end, minute=0, second=0)
# Clamp to actual interval
interval_start = max(current, day_start)
interval_end = min(end, day_end)
if interval_start < interval_end:
hours += (interval_end - interval_start).total_seconds() / 3600
# Move to next day
current = (current + timedelta(days=1)).replace(
hour=0, minute=0, second=0, microsecond=0
)
return hoursend_time = Nonedef find_bottlenecks(durations: list[StateDuration]) -> dict[str, dict]:
"""Identify states where items spend most time."""
by_state = {}
for duration in durations:
if duration.state not in by_state:
by_state[duration.state] = {
'total_days': 0,
'count': 0,
'max_days': 0,
'avg_business_hours': 0
}
stats = by_state[duration.state]
if duration.end_time: # Only closed items
stats['total_days'] += duration.calendar_days
stats['count'] += 1
stats['max_days'] = max(stats['max_days'], duration.calendar_days)
# Calculate averages
for state, stats in by_state.items():
if stats['count'] > 0:
stats['avg_days'] = stats['total_days'] / stats['count']
# Sort by average duration
return dict(sorted(
by_state.items(),
key=lambda x: x[1].get('avg_days', 0),
reverse=True
))Cycle Time = sum of all state durations
(Useful for: capacity planning, delivery promises)Lead Time = cycle time minus waiting states
(Useful for: customer SLA tracking)Flow Efficiency = (active work time) / (total time)
(Useful for: process optimization)Review Wait Time = average time in "In Review" state
Development Time = average time in "In Progress" stateissue_key,state,start_time,end_time,calendar_days,business_hours
PROJ-123,To Do,2024-01-15T10:30:00Z,2024-01-16T09:00:00Z,0.94,8.5
PROJ-123,In Progress,2024-01-16T09:00:00Z,2024-01-18T14:30:00Z,2.23,16.5{
"issues": [
{
"key": "PROJ-123",
"states": [
{
"state": "To Do",
"calendar_days": 0.94,
"business_hours": 8.5
}
],
"cycle_time_days": 4.17
}
],
"summary": {
"by_state": {
"In Progress": {
"avg_days": 2.8,
"max_days": 8.5
}
}
}
}State Analysis Summary:
========================
Total Items Analyzed: 47
Average Cycle Time: 5.2 days (35.4 business hours)
By State:
In Progress: avg 2.8 days (18% of time)
Code Review: avg 1.9 days (23% of time) ⚠️ BOTTLENECK
Testing: avg 0.8 days (10% of time)
Done: avg 0.7 days (8% of time)from datetime import datetime, UTC
from dataclasses import dataclass
@dataclass
class YourStateTransition:
timestamp: datetime
from_state: str | None
to_state: str
# Add domain-specific fields:
# user_id: str
# reason: str
# severity: int
class YourStateAnalyzer:
def extract_transitions(self, audit_log: dict) -> list[YourStateTransition]:
"""Extract transitions from your system's format."""
# Implement for your audit log structure
pass
def calculate_durations(self, transitions: list) -> list[StateDuration]:
"""Calculate time in each state."""
# Implement calculation logic
pass
def find_bottlenecks(self, durations: list) -> dict:
"""Identify slow states."""
# Implement bottleneck detection
pass
def format_report(self, durations: list) -> str:
"""Export results."""
# Implement report generation
passpull_requests.timeline# Get issue with changelog
uv run jira-tool get PROJ-123
# Or analyze via CLI (shorter version)
uv run jira-tool search "key = PROJ-123" --expand changelog --format json -o issue.json
uv run jira-tool analyze state-durations issue.json -o durations.csv# Export all issues from last sprint with changelog
uv run jira-tool search "sprint in openSprints()" \
--expand changelog \
--format json \
-o sprint_issues.json
# Analyze state durations
uv run jira-tool analyze state-durations sprint_issues.json \
-o sprint_analysis.csv \
--business-hours
# Load CSV and find bottlenecks
python3 << 'EOF'
import pandas as pd
df = pd.read_csv('sprint_analysis.csv')
# Group by state and calculate averages
bottlenecks = df.groupby('state').agg({
'calendar_days': 'mean',
'business_hours': 'mean'
}).sort_values('calendar_days', ascending=False)
print("Bottleneck States (by average duration):")
print(bottlenecks)
EOF# Analyze customer-facing issues
uv run jira-tool search "type = Bug AND labels = urgent" \
--expand changelog \
--format json \
-o urgent_bugs.json
uv run jira-tool analyze state-durations urgent_bugs.json \
-o bug_analysis.csv \
--date-from 2024-01-01 --date-to 2024-12-31
# Check if average resolution time meets SLA (< 24 business hours)
python3 << 'EOF'
import pandas as pd
df = pd.read_csv('bug_analysis.csv')
total_hours = df[df['state'] == 'Resolved']['business_hours'].sum()
avg_hours = df[df['state'] == 'Resolved']['business_hours'].mean()
sla_threshold = 24
compliance = (avg_hours <= sla_threshold) * 100 if avg_hours > 0 else 0
print(f"Average Resolution Time: {avg_hours:.1f} business hours")
print(f"SLA Threshold: {sla_threshold} hours")
print(f"Compliance: {compliance:.0f}%")
EOF# analyze_github_prs.py
import json
from datetime import datetime, UTC
from dataclasses import dataclass
@dataclass
class PRStateTransition:
timestamp: datetime
from_state: str | None
to_state: str
class GitHubPRAnalyzer:
"""Analyze GitHub PR time in review."""
def extract_transitions(self, pr_data: dict):
"""Extract state transitions from GitHub PR timeline."""
transitions = []
# PR created = initial state
created_at = datetime.fromisoformat(pr_data['created_at'].replace('Z', '+00:00'))
transitions.append(PRStateTransition(
timestamp=created_at,
from_state=None,
to_state='Draft' if pr_data['draft'] else 'Open'
))
# Parse review states from timeline
for event in pr_data.get('timeline', []):
if event['event'] == 'review_requested':
transitions.append(PRStateTransition(
timestamp=datetime.fromisoformat(event['created_at'].replace('Z', '+00:00')),
from_state='Open',
to_state='In Review'
))
elif event['event'] == 'pull_request_review':
transitions.append(PRStateTransition(
timestamp=datetime.fromisoformat(event['submitted_at'].replace('Z', '+00:00')),
from_state='In Review',
to_state=f"Review-{event['state']}" # APPROVED, CHANGES_REQUESTED, COMMENTED
))
elif event['event'] == 'merged':
transitions.append(PRStateTransition(
timestamp=datetime.fromisoformat(event['created_at'].replace('Z', '+00:00')),
from_state=transitions[-1].to_state,
to_state='Merged'
))
return transitions
# Usage
analyzer = GitHubPRAnalyzer()
pr_data = json.load(open('pr.json'))
transitions = analyzer.extract_transitions(pr_data)
for t in transitions:
print(f"{t.from_state} → {t.to_state} at {t.timestamp}")JIRA_BASE_URLJIRA_USERNAMEJIRA_API_TOKENuv sync--expand changelog--expand changelog