Loading...
Loading...
MaaNTE is a MAA-based automation assistant for the game Neverness to Everness, powered by MaaFramework, supporting auto-fishing, auto-coffee-making, and cafe revenue extraction.
npx skill4agent add aradotso/trending-skills maante-game-automationSkill by ara.so — Daily 2026 Skills collection.
https://github.com/1bananachicken/MaaNTE/releasesgit clone --recursive https://github.com/<your-username>/MaaNTE.git
cd MaaNTEpip install -r requirements.txtdeps/MaaNTE/
deps/
MaaFramework/
bin/
include/
lib/MaaNTE/
├── assets/
│ └── logo.png
├── deps/ # MaaFramework binaries (not committed)
├── pipeline/ # JSON pipeline task definitions
│ ├── fishing/
│ ├── coffee/
│ └── cafe/
├── custom/ # Python custom action/recognizer scripts
├── docs/
│ └── README_en.md
├── interface.json # MFAAvalonia GUI configuration
└── main.py # Entry point (dev mode){
"TaskName": {
"recognition": "TemplateMatch",
"template": "fishing/float.png",
"roi": [0, 0, 1280, 720],
"action": "Click",
"next": ["NextTask"],
"timeout": 10000,
"on_error": ["ErrorHandlerTask"]
}
}| Type | Description |
|---|---|
| Find image template on screen |
| Optical character recognition |
| Match pixel color |
| Always triggers (no recognition) |
| Action | Description |
|---|---|
| Click matched region |
| Swipe gesture |
| Press keyboard key |
| Launch application |
| Stop application |
| Call Python custom action |
# custom/my_action.py
from maa.agent.agent_server import AgentServer
from maa.custom_action import CustomAction
from maa.context import Context
from maa.define import RectType
import json
class MyCustomAction(CustomAction):
def run(
self,
context: Context,
argv: CustomAction.RunArg,
) -> CustomAction.RunResult:
# Access current task arguments
task_name = argv.task_name
custom_param = json.loads(argv.custom_action_param)
# Take a screenshot and find something
image = context.tasker.controller.cached_image
# Run a sub-pipeline task
context.run_pipeline("AnotherTask")
# Click at specific coordinates
context.tasker.controller.post_click(640, 360).wait()
return CustomAction.RunResult(success=True)
# Register and start agent server
if __name__ == "__main__":
AgentServer.start_up(AgentServer.parse_argv())
server = AgentServer()
server.register_custom_action("MyCustomAction", MyCustomAction())
server.join(){
"TriggerMyAction": {
"recognition": "DirectHit",
"action": "Custom",
"custom_action": "MyCustomAction",
"custom_action_param": "{\"key\": \"value\"}"
}
}# custom/my_recognizer.py
from maa.custom_recognizer import CustomRecognizer
from maa.context import Context
import numpy as np
class MyCustomRecognizer(CustomRecognizer):
def analyze(
self,
context: Context,
argv: CustomRecognizer.AnalyzeArg,
) -> CustomRecognizer.AnalyzeResult:
image = argv.image # numpy array (H, W, C) BGR
# Your image analysis logic here
# Example: check average color in a region
roi = image[300:400, 600:700]
mean_color = np.mean(roi, axis=(0, 1))
found = mean_color[2] > 200 # high red channel
if found:
# Return bounding box of found region
return CustomRecognizer.AnalyzeResult(
box=(600, 300, 100, 100), # x, y, w, h
detail="found red region"
)
return CustomRecognizer.AnalyzeResult(box=None, detail="not found")# Run with default config
python main.py
# The GUI is provided by MFAAvalonia (separate executable)
# For pipeline-only testing use MaaFramework CLI tools in deps/interface.json{
"name": "MaaNTE",
"version": "1.0.0",
"tasks": [
{
"name": "自动钓鱼",
"entry": "StartFishing",
"option": [
{
"name": "自动卖鱼",
"cases": [
{"name": "开启", "pipeline_override": {"SellFish": {"enabled": true}}},
{"name": "关闭", "pipeline_override": {"SellFish": {"enabled": false}}}
]
}
]
},
{
"name": "自动做咖啡",
"entry": "StartCoffee"
}
],
"controller": [
{
"name": "Win32",
"type": "Win32",
"screencap": "FramePool",
"input": "Seize"
}
]
}⚠️ Auto-coffee requires— this takes over mouse control while running.input: "Seize"
from maa.toolkit import Toolkit
from maa.controller import Win32Controller
Toolkit.init_option("./")
controller = Win32Controller(
hWnd=Toolkit.find_window("", "NTE_WindowTitle")
)
controller.post_connection().wait()
# Save screenshot for template
image = controller.cached_image
import cv2
cv2.imwrite("assets/template/my_element.png", image){
"DetectFishBite": {
"recognition": "TemplateMatch",
"template": "fishing/fish_bite_indicator.png",
"threshold": 0.85,
"roi": [500, 400, 300, 200],
"action": "Click",
"next": ["RecastLine"],
"timeout": 30000
}
}# Always branch from dev for new features
git checkout dev
git pull upstream dev
git checkout -b feature/my-new-task
# Add pipeline JSON in pipeline/
# Add any custom Python in custom/
# Update interface.json to expose task in GUI
git add .
git commit -m "feat: add auto-xxx task"
git push origin feature/my-new-task
# Open PR targeting the dev branch# Debug: lower threshold temporarily
{
"MyTask": {
"recognition": "TemplateMatch",
"template": "my_template.png",
"threshold": 0.7, # default 0.8, lower = more lenient
"roi": [0, 0, 1280, 720]
}
}from maa.toolkit import Toolkit
# List all available windows
windows = Toolkit.find_window_list("", "")
for w in windows:
print(f"hwnd={w.hwnd} class={w.class_name} title={w.window_name}")Seize