Loading...
Loading...
Expert-level AWS cloud architecture, services, security, cost optimization, and best practices
npx skill4agent add personamanagmentlayer/pcl aws-expert# Launch EC2 instance
aws ec2 run-instances \
--image-id ami-0c55b159cbfafe1f0 \
--instance-type t3.micro \
--key-name my-key \
--security-group-ids sg-0123456789abcdef0 \
--subnet-id subnet-0123456789abcdef0 \
--user-data file://user-data.sh \
--tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=WebServer}]'
# List instances
aws ec2 describe-instances \
--filters "Name=tag:Environment,Values=production" \
--query 'Reservations[*].Instances[*].[InstanceId,State.Name,PrivateIpAddress]' \
--output table
# Start/Stop instances
aws ec2 start-instances --instance-ids i-1234567890abcdef0
aws ec2 stop-instances --instance-ids i-1234567890abcdef0
# Create AMI
aws ec2 create-image \
--instance-id i-1234567890abcdef0 \
--name "WebServer-Backup-$(date +%Y%m%d)" \
--description "Backup of web server"
# User data script
#!/bin/bash
yum update -y
yum install -y docker
systemctl start docker
systemctl enable docker
docker run -d -p 80:80 nginx# lambda_function.py
import json
import boto3
def lambda_handler(event, context):
# Parse input
body = json.loads(event.get('body', '{}'))
name = body.get('name', 'World')
# Process
message = f"Hello, {name}!"
# Return response
return {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': json.dumps({'message': message})
}
# Create Lambda function
aws lambda create-function \
--function-name my-function \
--runtime python3.11 \
--role arn:aws:iam::123456789012:role/lambda-role \
--handler lambda_function.lambda_handler \
--zip-file fileb://function.zip \
--timeout 30 \
--memory-size 256 \
--environment Variables={ENV=production,DB_HOST=mydb.example.com}
# Invoke Lambda
aws lambda invoke \
--function-name my-function \
--payload '{"name": "Alice"}' \
response.json
# Update function code
aws lambda update-function-code \
--function-name my-function \
--zip-file fileb://function.zip// task-definition.json
{
"family": "web-app",
"networkMode": "awsvpc",
"requiresCompatibilities": ["FARGATE"],
"cpu": "256",
"memory": "512",
"containerDefinitions": [
{
"name": "web",
"image": "123456789012.dkr.ecr.us-east-1.amazonaws.com/web-app:latest",
"portMappings": [
{
"containerPort": 80,
"protocol": "tcp"
}
],
"environment": [
{"name": "ENV", "value": "production"},
{"name": "PORT", "value": "80"}
],
"secrets": [
{
"name": "DB_PASSWORD",
"valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:db-password"
}
],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/web-app",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "ecs"
}
}
}
]
}# Register task definition
aws ecs register-task-definition --cli-input-json file://task-definition.json
# Create ECS service
aws ecs create-service \
--cluster my-cluster \
--service-name web-app \
--task-definition web-app:1 \
--desired-count 3 \
--launch-type FARGATE \
--network-configuration "awsvpcConfiguration={subnets=[subnet-12345],securityGroups=[sg-12345],assignPublicIp=ENABLED}" \
--load-balancers "targetGroupArn=arn:aws:elasticloadbalancing:...,containerName=web,containerPort=80"
# Update service
aws ecs update-service \
--cluster my-cluster \
--service web-app \
--desired-count 5# Create bucket
aws s3 mb s3://my-bucket --region us-east-1
# Upload file
aws s3 cp file.txt s3://my-bucket/
aws s3 cp folder/ s3://my-bucket/folder/ --recursive
# Download file
aws s3 cp s3://my-bucket/file.txt .
aws s3 sync s3://my-bucket/folder/ ./folder/
# List objects
aws s3 ls s3://my-bucket/
aws s3 ls s3://my-bucket/folder/ --recursive
# Delete objects
aws s3 rm s3://my-bucket/file.txt
aws s3 rm s3://my-bucket/folder/ --recursive
# Set bucket policy
aws s3api put-bucket-policy \
--bucket my-bucket \
--policy file://bucket-policy.json
# Enable versioning
aws s3api put-bucket-versioning \
--bucket my-bucket \
--versioning-configuration Status=Enabled
# Enable encryption
aws s3api put-bucket-encryption \
--bucket my-bucket \
--server-side-encryption-configuration '{
"Rules": [{
"ApplyServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}]
}'
# Lifecycle policy
aws s3api put-bucket-lifecycle-configuration \
--bucket my-bucket \
--lifecycle-configuration file://lifecycle.json// lifecycle.json
{
"Rules": [
{
"Id": "Move to Glacier after 90 days",
"Status": "Enabled",
"Prefix": "logs/",
"Transitions": [
{
"Days": 90,
"StorageClass": "GLACIER"
}
],
"Expiration": {
"Days": 365
}
}
]
}# Create volume
aws ec2 create-volume \
--volume-type gp3 \
--size 100 \
--availability-zone us-east-1a \
--iops 3000 \
--throughput 125
# Attach volume
aws ec2 attach-volume \
--volume-id vol-1234567890abcdef0 \
--instance-id i-1234567890abcdef0 \
--device /dev/sdf
# Create snapshot
aws ec2 create-snapshot \
--volume-id vol-1234567890abcdef0 \
--description "Backup $(date +%Y%m%d)"
# Copy snapshot to another region
aws ec2 copy-snapshot \
--source-region us-east-1 \
--source-snapshot-id snap-1234567890abcdef0 \
--region us-west-2# Create DB instance
aws rds create-db-instance \
--db-instance-identifier mydb \
--db-instance-class db.t3.micro \
--engine postgres \
--engine-version 15.3 \
--master-username admin \
--master-user-password MySecurePassword123 \
--allocated-storage 20 \
--storage-type gp3 \
--vpc-security-group-ids sg-0123456789abcdef0 \
--db-subnet-group-name my-subnet-group \
--backup-retention-period 7 \
--preferred-backup-window "03:00-04:00" \
--preferred-maintenance-window "mon:04:00-mon:05:00" \
--multi-az \
--storage-encrypted \
--enable-cloudwatch-logs-exports '["postgresql"]'
# Create read replica
aws rds create-db-instance-read-replica \
--db-instance-identifier mydb-replica \
--source-db-instance-identifier mydb \
--db-instance-class db.t3.micro
# Create snapshot
aws rds create-db-snapshot \
--db-instance-identifier mydb \
--db-snapshot-identifier mydb-snapshot-$(date +%Y%m%d)
# Restore from snapshot
aws rds restore-db-instance-from-db-snapshot \
--db-instance-identifier mydb-restored \
--db-snapshot-identifier mydb-snapshot-20240119import boto3
dynamodb = boto3.resource('dynamodb')
# Create table
table = dynamodb.create_table(
TableName='Users',
KeySchema=[
{'AttributeName': 'userId', 'KeyType': 'HASH'}, # Partition key
{'AttributeName': 'timestamp', 'KeyType': 'RANGE'} # Sort key
],
AttributeDefinitions=[
{'AttributeName': 'userId', 'AttributeType': 'S'},
{'AttributeName': 'timestamp', 'AttributeType': 'N'},
{'AttributeName': 'email', 'AttributeType': 'S'}
],
GlobalSecondaryIndexes=[
{
'IndexName': 'EmailIndex',
'KeySchema': [{'AttributeName': 'email', 'KeyType': 'HASH'}],
'Projection': {'ProjectionType': 'ALL'},
'ProvisionedThroughput': {'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5}
}
],
BillingMode='PAY_PER_REQUEST' # Or PROVISIONED
)
# Put item
table = dynamodb.Table('Users')
table.put_item(
Item={
'userId': 'user123',
'timestamp': 1234567890,
'name': 'Alice',
'email': 'alice@example.com'
}
)
# Get item
response = table.get_item(Key={'userId': 'user123', 'timestamp': 1234567890})
item = response.get('Item')
# Query
response = table.query(
KeyConditionExpression='userId = :uid',
ExpressionAttributeValues={':uid': 'user123'}
)
# Scan (avoid in production - use query instead)
response = table.scan(
FilterExpression='email = :email',
ExpressionAttributeValues={':email': 'alice@example.com'}
)# Create VPC
aws ec2 create-vpc --cidr-block 10.0.0.0/16
# Create subnets
aws ec2 create-subnet \
--vpc-id vpc-1234567890abcdef0 \
--cidr-block 10.0.1.0/24 \
--availability-zone us-east-1a
aws ec2 create-subnet \
--vpc-id vpc-1234567890abcdef0 \
--cidr-block 10.0.2.0/24 \
--availability-zone us-east-1b
# Create internet gateway
aws ec2 create-internet-gateway
aws ec2 attach-internet-gateway \
--vpc-id vpc-1234567890abcdef0 \
--internet-gateway-id igw-1234567890abcdef0
# Create route table
aws ec2 create-route-table --vpc-id vpc-1234567890abcdef0
aws ec2 create-route \
--route-table-id rtb-1234567890abcdef0 \
--destination-cidr-block 0.0.0.0/0 \
--gateway-id igw-1234567890abcdef0
# Associate route table with subnet
aws ec2 associate-route-table \
--subnet-id subnet-1234567890abcdef0 \
--route-table-id rtb-1234567890abcdef0
# Create security group
aws ec2 create-security-group \
--group-name web-sg \
--description "Web server security group" \
--vpc-id vpc-1234567890abcdef0
# Add inbound rules
aws ec2 authorize-security-group-ingress \
--group-id sg-1234567890abcdef0 \
--protocol tcp \
--port 80 \
--cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress \
--group-id sg-1234567890abcdef0 \
--protocol tcp \
--port 443 \
--cidr 0.0.0.0/0# Create Application Load Balancer
aws elbv2 create-load-balancer \
--name my-alb \
--subnets subnet-12345 subnet-67890 \
--security-groups sg-12345 \
--scheme internet-facing \
--type application \
--ip-address-type ipv4
# Create target group
aws elbv2 create-target-group \
--name my-targets \
--protocol HTTP \
--port 80 \
--vpc-id vpc-12345 \
--health-check-protocol HTTP \
--health-check-path /health \
--health-check-interval-seconds 30 \
--health-check-timeout-seconds 5 \
--healthy-threshold-count 2 \
--unhealthy-threshold-count 3
# Register targets
aws elbv2 register-targets \
--target-group-arn arn:aws:elasticloadbalancing:... \
--targets Id=i-12345 Id=i-67890
# Create listener
aws elbv2 create-listener \
--load-balancer-arn arn:aws:elasticloadbalancing:... \
--protocol HTTP \
--port 80 \
--default-actions Type=forward,TargetGroupArn=arn:aws:elasticloadbalancing:...// policy.json - S3 read-only policy
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::my-bucket",
"arn:aws:s3:::my-bucket/*"
]
}
]
}# Create IAM user
aws iam create-user --user-name alice
# Create access key
aws iam create-access-key --user-name alice
# Create policy
aws iam create-policy \
--policy-name S3ReadOnlyPolicy \
--policy-document file://policy.json
# Attach policy to user
aws iam attach-user-policy \
--user-name alice \
--policy-arn arn:aws:iam::123456789012:policy/S3ReadOnlyPolicy
# Create role
aws iam create-role \
--role-name lambda-role \
--assume-role-policy-document file://trust-policy.json
# Attach policy to role
aws iam attach-role-policy \
--role-name lambda-role \
--policy-arn arn:aws:iam::aws:policy/AWSLambdaBasicExecutionRole# Store secret
aws secretsmanager create-secret \
--name db-password \
--description "Database password" \
--secret-string '{"username":"admin","password":"MySecurePassword123"}'
# Retrieve secret
aws secretsmanager get-secret-value --secret-id db-password
# Rotate secret
aws secretsmanager rotate-secret \
--secret-id db-password \
--rotation-lambda-arn arn:aws:lambda:...# Put metric data
aws cloudwatch put-metric-data \
--namespace MyApp \
--metric-name RequestCount \
--value 100 \
--timestamp $(date -u +"%Y-%m-%dT%H:%M:%S.000Z")
# Create alarm
aws cloudwatch put-metric-alarm \
--alarm-name high-cpu \
--alarm-description "Alert when CPU exceeds 80%" \
--metric-name CPUUtilization \
--namespace AWS/EC2 \
--statistic Average \
--period 300 \
--threshold 80 \
--comparison-operator GreaterThanThreshold \
--evaluation-periods 2 \
--alarm-actions arn:aws:sns:us-east-1:123456789012:my-topic \
--dimensions Name=InstanceId,Value=i-12345
# Query logs
aws logs filter-log-events \
--log-group-name /aws/lambda/my-function \
--start-time $(date -d '1 hour ago' +%s)000 \
--filter-pattern "ERROR"
# Create log group
aws logs create-log-group --log-group-name /aws/my-app
# Set retention
aws logs put-retention-policy \
--log-group-name /aws/my-app \
--retention-in-days 30# For EC2 instances
aws ec2 run-instances \
--iam-instance-profile Name=my-role \
...
# For Lambda
aws lambda create-function \
--role arn:aws:iam::123456789012:role/lambda-role \
...# Require MFA for sensitive operations
{
"Effect": "Deny",
"Action": "*",
"Resource": "*",
"Condition": {
"BoolIfExists": {"aws:MultiFactorAuthPresent": "false"}
}
}# Launch resources in private subnets
# Use NAT Gateway for outbound internet access
# Implement least-privilege security groups# S3 encryption
--server-side-encryption AES256
# EBS encryption
--encrypted
# RDS encryption
--storage-encrypted# S3 versioning
# RDS automated backups
# EBS snapshots
# Cross-region replication# Use Reserved Instances for predictable workloads
# Use Spot Instances for flexible workloads
# Right-size instances
# Use S3 lifecycle policies
# Enable S3 Intelligent-Tiering
# Delete unused resources# Consistent tagging strategy
--tags Key=Environment,Value=production \
Key=Project,Value=webapp \
Key=CostCenter,Value=engineering