yolo自动化项目实例解析(一)日志格式输出、并发异步多线程、websocket、循环截图、yolo推理、3d寻路
本系列是为了学习自动化操作相关知识点,对开源项目原神ai的技术理解,开源地址如下,拉取版本号为1.78
https://gitee.com/LanRenZhiNeng/lanren-genshin-impact-ai
一、main入口主要函数说明
vi main.py
封装的函数较多,我们拆分开选说这段,下面这个代码主要分为3个部分,
1、游戏窗口句柄查找
2、当前软件版本更新
3、获取管理员权限
if __name__ == '__main__':
try:
if ctypes.windll.shell32.IsUserAnAdmin():
#游戏句柄查找
hwnd_cmd = find_child_window_handle(0, "ConsoleWindowClass", "YuanShenAi")
if hwnd_cmd == 0:
hwnd_cmd = find_child_window_handle(0, "ConsoleWindowClass", "懒人原神")
#当前软件版本更新
vardata = get_var_data()
if vardata != False:
updatetxt = get_updatetxt()
print(updatetxt)
print("当前版本号:" + state.版本号)
if float(vardata[1]) > float(state.版本号):
print(
"如果更新失败手动下更新包手动覆盖: https://gitee.com/LanRenZhiNeng/lanren-genshin-impact-ai/releases")
var = input(f"发现更新的版本 v{vardata[1]} 是否需要更新? (输入yes or no 默认更新)\n")
if var == "":
var = "yes"
if var.lower() != "no":
update_process = subprocess.Popen(f'update.exe "{vardata[0]}.exe" "{vardata[2]}"',
creationflags=subprocess.CREATE_NEW_CONSOLE)
time.sleep(1)
sys.exit()
# 运行需要管理员权限的代码
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), 128)
os.environ['PNG_IGNORE_WARNINGS'] = '1'
下面是第三方写的库对应代码及说明
1、查找子窗口句柄及鼠标方法
vi lanrentools/moni.py
# -*- coding: utf-8 -*-
import ctypes
from ctypes import wintypes
import win32con
import win32gui
# 获取当前屏幕宽度和高度
screen_width = ctypes.windll.user32.GetSystemMetrics(0)
screen_height = ctypes.windll.user32.GetSystemMetrics(1)
# 定义MOUSEINPUT结构体,用于描述鼠标输入事件
class MOUSEINPUT(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long), # 鼠标X轴移动距离
("dy", ctypes.c_long), # 鼠标Y轴移动距离
("mouseData", ctypes.c_ulong), # 鼠标的额外数据,通常用于滚轮
("dwFlags", ctypes.c_ulong), # 鼠标事件标志位
("time", ctypes.c_ulong), # 事件发生的时间
("dwExtraInfo", ctypes.POINTER(ctypes.c_ulong))] # 额外信息指针
# 定义INPUT结构体,用于封装各种类型的输入事件
class INPUT(ctypes.Structure):
class _INPUT(ctypes.Union):
_fields_ = [("mi", MOUSEINPUT)] # 鼠标输入
_fields_ = [("type", ctypes.c_ulong), # 输入事件类型
("input", _INPUT)] # 输入事件的具体内容
# 鼠标相对移动函数
def mouse_moveR(dx, dy):
# 构造输入事件列表
inputs = []
inputs.append(INPUT(type=0, input=INPUT._INPUT(
mi=MOUSEINPUT(dx=dx, dy=dy, mouseData=0, dwFlags=win32con.MOUSEEVENTF_MOVE, time=0, dwExtraInfo=None))))
# 发送输入事件
ctypes.windll.user32.SendInput(len(inputs), ctypes.byref(inputs[0]), ctypes.sizeof(INPUT))
# 鼠标绝对移动函数
def mouse_move(x, y):
# 计算目标位置的绝对坐标值
absolute_x = int(x / screen_width * 65535)
absolute_y = int(y / screen_height * 65535)
# 构造输入事件列表
inputs = []
inputs.append(INPUT(type=0, input=INPUT._INPUT(
mi=MOUSEINPUT(dx=absolute_x, dy=absolute_y, mouseData=0,
dwFlags=win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE,
time=0, dwExtraInfo=None))))
# 发送输入事件
ctypes.windll.user32.SendInput(len(inputs), ctypes.byref(inputs[0]), ctypes.sizeof(INPUT))
# 鼠标左键按下
def mouse_left_down():
# 构造输入事件列表
inputs = []
inputs.append(INPUT(type=0, input=INPUT._INPUT(
mi=MOUSEINPUT(dx=0, dy=0, mouseData=0, dwFlags=win32con.MOUSEEVENTF_LEFTDOWN, time=0, dwExtraInfo=None))))
# 发送输入事件
ctypes.windll.user32.SendInput(len(inputs), ctypes.byref(inputs[0]), ctypes.sizeof(INPUT))
# 鼠标左键弹起
def mouse_left_up():
# 构造输入事件列表
inputs = []
inputs.append(INPUT(type=0, input=INPUT._INPUT(
mi=MOUSEINPUT(dx=0, dy=0, mouseData=0, dwFlags=win32con.MOUSEEVENTF_LEFTUP, time=0, dwExtraInfo=None))))
# 发送输入事件
ctypes.windll.user32.SendInput(len(inputs), ctypes.byref(inputs[0]), ctypes.sizeof(INPUT))
# 鼠标右键按下
def mouse_right_down():
# 构造输入事件列表
inputs = []
inputs.append(INPUT(type=0, input=INPUT._INPUT(
mi=MOUSEINPUT(dx=0, dy=0, mouseData=0, dwFlags=win32con.MOUSEEVENTF_RIGHTDOWN, time=0, dwExtraInfo=None))))
# 发送输入事件
ctypes.windll.user32.SendInput(len(inputs), ctypes.byref(inputs[0]), ctypes.sizeof(INPUT))
# 鼠标右键弹起
def mouse_right_up():
# 构造输入事件列表
inputs = []
inputs.append(INPUT(type=0, input=INPUT._INPUT(
mi=MOUSEINPUT(dx=0, dy=0, mouseData=0, dwFlags=win32con.MOUSEEVENTF_RIGHTUP, time=0, dwExtraInfo=None))))
# 发送输入事件
ctypes.windll.user32.SendInput(len(inputs), ctypes.byref(inputs[0]), ctypes.sizeof(INPUT))
# 鼠标中键按下
def mouse_middle_down():
# 构造输入事件列表
inputs = []
inputs.append(INPUT(type=0, input=INPUT._INPUT(
mi=MOUSEINPUT(dx=0, dy=0, mouseData=0, dwFlags=win32con.MOUSEEVENTF_MIDDLEDOWN, time=0, dwExtraInfo=None))))
# 发送输入事件
ctypes.windll.user32.SendInput(len(inputs), ctypes.byref(inputs[0]), ctypes.sizeof(INPUT))
# 鼠标中键弹起
def mouse_middle_up():
# 构造输入事件列表
inputs = []
inputs.append(INPUT(type=0, input=INPUT._INPUT(
mi=MOUSEINPUT(dx=0, dy=0, mouseData=0, dwFlags=win32con.MOUSEEVENTF_MIDDLEUP, time=0, dwExtraInfo=None))))
# 发送输入事件
ctypes.windll.user32.SendInput(len(inputs), ctypes.byref(inputs[0]), ctypes.sizeof(INPUT))
# 原始窗口样式存储变量
original_style = 0
# 设置窗口鼠标穿透
def set_mouse_through(hwnd, setPenetrate=True):
global original_style
if setPenetrate:
style = win32gui.GetWindowLong(hwnd, win32con.GWL_EXSTYLE)
original_style = style # 保存原始样式,以便恢复
new_style = style | win32con.WS_EX_TRANSPARENT | win32con.WS_EX_LAYERED
win32gui.SetWindowLong(hwnd, win32con.GWL_EXSTYLE, new_style)
else:
if original_style == 0:
win32gui.SetWindowLong(hwnd, win32con.GWL_EXSTYLE, win32con.WS_EX_LAYERED)
else:
win32gui.SetWindowLong(hwnd, win32con.GWL_EXSTYLE, original_style)
# 强制激活窗口
def set_window_activate(hwnd):
try:
# 判断窗口是否为最小化
if win32gui.IsIconic(hwnd):
# 还原窗口
win32gui.ShowWindow(hwnd, win32con.SW_RESTORE)
# 将窗口置顶
win32gui.SetWindowPos(hwnd, win32con.HWND_TOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE | win32con.SWP_NOSIZE)
# 取消置顶
win32gui.SetWindowPos(hwnd, win32con.HWND_NOTOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE | win32con.SWP_NOSIZE)
win32gui.SetForegroundWindow(hwnd)
except:
pass
# 设置窗口显示关联属性
def set_window_display_affinity(hwnd):
# WDA_NONE - 0x00000000 - 对窗口的显示位置没有限制。
# WDA_MONITOR - 0x00000001 - 窗口内容仅显示在监视器上。 在其他任何位置,窗口都会显示,其中没有任何内容。
# WDA_EXCLUDEFROMCAPTURE - 0x00000011 - 窗口仅显示在监视器上。 在其他位置,窗口根本不显示。
dwAffinity = wintypes.DWORD(0x00000011) # 使用wintypes模块, “0x00000001”可换成其他值
dll = ctypes.cdll.LoadLibrary("C:\\WINDOWS\\system32\\user32.dll")
dll.SetWindowDisplayAffinity(hwnd, dwAffinity)
# 显示或隐藏窗口
def show_hide_window(hwnd, show):
if show:
win32gui.ShowWindow(hwnd, 5) # 5对应SW_SHOW
else:
win32gui.ShowWindow(hwnd, 0) # 0对应SW_HIDE
# 查找子窗口句柄
# hwnd_cmd = find_child_window_handle(0, "ConsoleWindowClass", "YuanShenAi")
def find_child_window_handle(parent, class_name, window_title):
hwnd_child = 0
while True:
# FindWindowEx从给定的父窗口 parent 中查找具有指定类名 class_name 的子窗口句柄
hwnd_child = win32gui.FindWindowEx(parent, hwnd_child, class_name, None)
if hwnd_child:
# 获取子窗口的标题
buffer = win32gui.GetWindowText(hwnd_child)
# 检查窗口标题是否包含指定的字符串
if window_title in buffer:
return hwnd_child # 如果找到了匹配的子窗口,返回它的句柄
else:
return 0 # 如果没有找到任何子窗口,返回0
2、检查版本更新
vi jianchaupdate.py
import html
import requests
def get_var_data():
try:
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47"
}
res = requests.get(
"https://note.youdao.com/yws/api/personal/preview/WEB96c82d6b37c9fd60f8b1d146731080d6?method=convert&shareToken=f3fb8b827c2fbcd651073cf7b55a6b3e&engine=nyozo",
headers=headers)
if res.status_code == 200:
return html.unescape(res.text).split("<pre>")[-1].split("</pre>")[0].split("\n")[:-1]
else:
return False
except:
print("请不要开梯子")
return False
def get_updatetxt():
try:
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47"
}
res = requests.get(
"https://note.youdao.com/yws/api/personal/preview/WEB9b9f3f25e3921ae407edf8ba9537b847?method=convert&shareToken=8b937504aee5dcb1f8c62678d7fffdad&engine=nyozo",
headers=headers)
if res.status_code == 200:
return html.unescape(res.text).split("<pre>")[-1].split("</pre>")[0][:-1]
else:
return ""
except:
print("无法获取更新日志,请不要开梯子!")
return ""
if __name__ == '__main__':
print(get_var_data())
print(get_updatetxt())
3、全局变量
vi state.py
# -*- coding: utf-8 -*-
import configparser
import json
import os
import sys
# 创建 ConfigParser 对象
config = configparser.ConfigParser()
# 加载 INI 文件
config.read("./datas/setting.ini")
GAME_TITLE=config.get( 'seting','GAME_TITLE', fallback='原神')
LIANZHAO=config.get( 'seting', 'LIANZHAO',fallback='阵容1草神2久岐忍3钟离4雷神.txt')
PATH_TASK=config.get( 'seting','PATH_TASK', fallback='./datas/Task/')
PATH_JIAOBEN=config.get( 'seting', 'PATH_JIAOBEN',fallback='./datas/JiaoBen/')
PACKS_TASK=config.get( 'seting', 'PACKS_TASK',fallback='./datas/Task/')
PATH_JUESE=config.get( 'seting', 'PATH_JUESE',fallback='./datas/img/juese/')
PATH_ADDRESS=config.get( 'seting', 'PATH_ADDRESS',fallback='./datas/img/address/')
WEIGHTS=config.get( 'seting','WEIGHTS', fallback='./datas/yolov5s_320.onnx')
IMGSIZE_WIDTH=int(config.get( 'seting','IMGSIZE_WIDTH', fallback='320'))
IMGSIZE_HEIGHT=int(config.get( 'seting','IMGSIZE_HEIGHT', fallback='320'))
PROVIDERS=config.get( 'seting','PROVIDERS', fallback="""["CUDAExecutionProvider", "CPUExecutionProvider"]""")
PROVIDERS=json.loads(PROVIDERS.replace("'",'"'))
LIANZHAOFUWU=config.get( 'seting','LIANZHAOFUWU', fallback='./datas/jiaoben/躺宝连招插件.exe')
DUANGKOUHAO=config.get( 'seting','DUANGKOUHAO', fallback='29943')
WINDOW_WIDTH=int(config.get( 'seting','WINDOW_WIDTH', fallback="640"))
WINDOW_HEIGHT=int(config.get('seting','WINDOW_HEIGHT', fallback="900"))
WINDOW_LEFT=int(config.get( 'seting', 'WINDOW_LEFT',fallback="0"))
WINDOW_TOP=int(config.get( 'seting','WINDOW_TOP', fallback="300"))
ON_SHUTDOWN=int(config.get( 'seting','ON_SHUTDOWN', fallback="0"))
ON_JIXING=int(config.get( 'seting','ON_JIXING', fallback="0"))
ON_NEXTPACK=int(config.get( 'seting','ON_NEXTPACK', fallback="0"))
ON_LIANZHAOBUJIANCE=int(config.get( 'seting','ON_LIANZHAOBUJIANCE', fallback="0"))
TIMEOUT_DAGUAI=int(config.get( 'seting', 'TIMEOUT_DAGUAI',fallback='120'))
dic_labels={0:"玩家",1:"采集物",2:"生命树",3:"开关",4:"怪物",5:"提示关闭",6:"使用原粹树脂",7:"使用浓缩树脂",
8:"关闭",9:"继续挑战",10:"退出秘境",11:"副本门框",12:"副本打完了",13:"捡东西",14:"副本楼梯",
15:"往下落",16:"矿石",17:"往上跳",18:"交互对话",19:"采集物-风",20:"采集物-冰",21:"确定",
22:"取消",23:"返回",24:"被控了",25:"在水面",26:"宠物",27:"掉落物"}
状态_寻路中=True
状态_YOLOV=False
状态_检测中 = False
状态_循环开关 = False
状态_在爬墙=False
状态_需重新传送=False
状态_全局暂停=False
状态_已经有寻路了=False
状态_传送中=False
状态_是否回放中=False
状态_是否暂停=False
状态_是否开始录制=False
状态_是否禁止录制=True
状态_开伞时间=-1
录制_当前脚本名=""
录制_脚本文本=""
录制_path=""
录制_当前任务索引=-1
计时_未寻路=0
计次_误差过大=0
计次_移动停滞=0
计次_定位失败=0
计次_识别次数=0
计数_没找到怪物=0
计数_卡主次数=0
计数_没找到任何目标=0
计次_传送重试次数=0
开关_是否展预测结果=True
图片_YOLOV=False
图片_找图=False
游戏_当前视野角度=0
游戏_当前导航角度=0
游戏_当前坐标=[0,0]
游戏_当前目标坐标=[0,0]
游戏_打怪前坐标=[0,0]
QT_信号=None
版本号="1.78"
python_var=sys.version_info[1]
# 添加临时路径
temp_path = os.path.dirname(__file__)+"\\"
sys.path.append(temp_path)
# 获取当前的PATH环境变量
original_path = os.environ['PATH']
os.environ['PATH'] = temp_path + os.pathsep + original_path
4、日志格式输出
vi logger_module.py
import logging
import os
from logging.handlers import TimedRotatingFileHandler
# 创建全局的 logger
logger = logging.getLogger("懒人原神AI")
logger.setLevel(logging.DEBUG)
# 创建一个handler,用于将日志输出到控制台
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
log_dir = './log'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# 创建一个handler,用于将日志输出到文件
file_handler = TimedRotatingFileHandler('./log/lanrenyuanshenai.log', when='midnight', interval=1, backupCount=7)
file_handler.setLevel(logging.DEBUG)
# 定义日志消息格式
class CustomFormatter(logging.Formatter):
FORMATS = {
logging.DEBUG: '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
logging.INFO: '%(asctime)s - %(name)s - %(levelname)s - %(message)s' ,
logging.WARNING: '%(asctime)s - %(name)s - %(levelname)s - %(message)s' ,
logging.ERROR: '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
logging.CRITICAL: '%(asctime)s - %(name)s - %(levelname)s - %(message)s' ,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
#创建一个formatter格式类
formatter = CustomFormatter()
#设置消息格式
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
# 将handler添加到logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
5、依赖库整理
公共依赖
import ctypes
import os
import subprocess
import sys
import time
import traceback
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QApplication
其他库依赖
#键鼠操作
from lanrentools.moni import find_child_window_handle, screen_width
#版本更新
from jianchaupdate import get_var_data, get_updatetxt
#全局变量
import state
#日志格式输出
from logger_module import logger
二、开启yolo事件循环(auto.py)
vi main.py
# 运行需要管理员权限的代码
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), 128)
os.environ['PNG_IGNORE_WARNINGS'] = '1'
# 在事件循环中创建一个任务对象并加入事件循环
aty = main() # 启动yolov服务
在入口函数中一个main函数的调用来自于auto.py, 体量较大拆分一下
1、添加并行异步多线程
vi auto.py
import asyncio
import threading
import state
def main():
global loop, atys # 全局变量 函数外也可调用
# 创建一个新的事件循环 (用于管理异步任务)
loop = asyncio.new_event_loop()
# 启动 WebSocket 服务器
# 创建一个新的线程来运行 WebSocket 服务器
# 创建一个新的线程 t,该线程的目标是运行 loop.run_until_complete 方法,并传入 server_main() 协程作为参数。这意味着线程 t 将运行事件循环并等待 server_main() 完成
t = threading.Thread(target=loop.run_until_complete, args=(server_main(),))
# 根据 state.python_var 的值来决定线程是否为守护线程
if state.python_var > 9:
t.daemon = True # 如果大于9,设置线程为守护线程
else:
t.setDaemon(True) # 如果小于等于9,同样设置线程为守护线程
# 启动线程
t.start()
#获取自动化游戏操作类
atys = AutoYuanShen(weights=state.WEIGHTS, providers=state.PROVIDERS,dic_labels=state.dic_labels,model_h=state.IMGSIZE_WIDTH, model_w=state.IMGSIZE_HEIGHT)
state.状态_YOLOV = True
return atys
main 函数用于启动整个应用。它创建一个新的事件循环,并在一个新线程中运行 server_main 函数。根据 state.python_var 的值来决定是否将线程设置为守护线程。
此外,它还初始化了一个
AutoYuanShen
类的对象,并设置了一些状态标志
2、添加websocket函数
异步函数,主要用于启动WebSocket服务器。它首先尝试创建一个监听在本地主机指定端口上的WebSocket服务器。如果端口被占用,它会让用户输入一个新的端口号,并将这个新的端口号保存到配置文件中。
成功创建服务器后,它尝试启动一个外部EXE程序,并输出日志信息。如果启动外部程序失败,它会记录错误信息并终止程序。
最后,这个函数等待WebSocket服务器关闭
vi auto.py
import websockets
import asyncio
import sys
import subprocess
import random
import configparser
import ctypes
async def server_main():
"""
主异步函数,用于启动WebSocket服务器并处理客户端连接。
"""
try:
# 创建WebSocket服务器,监听本地主机的指定端口,并设置最大消息大小为10MB
server = await websockets.serve(handle_client, "localhost", int(state.DUANGKOUHAO), max_size=1024 * 1024 * 10)
except Exception as e:
# 如果端口被占用,提示用户输入新的端口号
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), 7) # 设置控制台模式以显示颜色
state.DUANGKOUHAO = input(f"端口 {state.DUANGKOUHAO} 被占用了! 请输入新的端口号(然后手动重开): ")
# 如果输入的端口号长度不足3位,则随机生成一个端口号
if len(state.DUANGKOUHAO) < 3:
state.DUANGKOUHAO = int(random.randint(1000, 19999))
try:
# 创建 ConfigParser 对象
config = configparser.ConfigParser()
# 添加节和键-值对
config['seting'] = {
'DUANGKOUHAO': str(state.DUANGKOUHAO)
}
# 写入配置到 INI 文件
with open("./datas/setting.ini", 'w') as configfile:
config.write(configfile)
except Exception as e:
# 如果写入配置文件失败,记录错误但继续执行
print(f"写入配置文件失败: {e}")
# 终止程序
sys.exit()
try:
# 启动外部EXE程序
subprocess.Popen('"' + state.LIANZHAOFUWU + '" "' + str(state.DUANGKOUHAO) + '"')
# 输出日志信息
logger.info("躺宝连招插件服务已经启动!")
except Exception as e:
# 如果启动程序失败,输出错误信息
logger.error("文件没找到,可能被杀毒软件干掉了: " + state.LIANZHAOFUWU)
input("回车结束!")
# 终止程序
sys.exit()
# 启动事件循环,等待服务器关闭
await asyncio.gather(server.wait_closed())
3、客户端操作
andle_client函数,这个函数是一个异步函数,用于处理每个客户端连接。它首先向客户端发送一条“连接成功”的消息,并将新连接的客户端添加到connected_clients列表中。接着,它进入一个无限循环,接收来自客户端的消息,并根据消息的不同类型更新一些状态变量
如果收到的消息是 "是否回放中 真" 或 "是否回放中 假",则设置 state.状态_是否回放中 为 True 或 False。类似的逻辑也应用于 "是否暂停" 和 "是否开始录制" 的消息。如果消息以 "录制的脚本" 开头,那么它会提取脚本内容,并更新 state.录制_脚本文本 变量
如果在处理消息的过程中发生了ConnectionClosedError异常,那么该客户端将从connected_clients列表中移除。其他类型的异常也会被捕获,并记录错误日志
import threading
import cv2
import numpy as np
import state
import websockets
import asyncio
import sys
import subprocess
import random
import configparser
import ctypes
from logger_module import logger
# 存储所有已连接的客户端
atys = None
loop = None
connected_clients = []
new_msg = ""
async def handle_client(websocket, path):
"""
处理客户端连接的异步函数。
参数:
- websocket: WebSocket连接对象
- path: 连接路径
"""
global new_msg
try:
# 发送连接成功消息
await websocket.send("连接成功")
# 添加新连接的客户端到集合中
connected_clients.append(websocket)
while True:
# 接收客户端发送的消息
message = await websocket.recv()
new_msg = message
# 处理接收到的消息
if message == "是否回放中 真":
# 设置状态为正在回放
state.状态_是否回放中 = True
elif message == "是否回放中 假":
# 设置状态为未回放
state.状态_是否回放中 = False
elif message == "是否暂停 真":
# 设置状态为已暂停
state.状态_是否暂停 = True
elif message == "是否暂停 假":
# 设置状态为未暂停
state.状态_是否暂停 = False
elif message == "是否开始录制 假":
# 设置状态为未开始录制
state.状态_是否开始录制 = False
elif message == "是否开始录制 真":
# 设置状态为开始录制
if not state.状态_是否禁止录制:
# 发送提示信息
logger.info("请按F8结束录制")
state.QT_信号.mysig_tishi.emit("请按F8结束录制")
state.状态_是否开始录制 = True
elif message == "是否禁止录制 假":
# 设置状态为允许录制
state.状态_是否禁止录制 = False
elif message == "是否禁止录制 真":
# 设置状态为禁止录制
state.状态_是否禁止录制 = True
elif message.startswith("录制的脚本"):
# 获取录制的脚本内容
state.录制_脚本文本 = message[6:]
if state.录制_脚本文本 != "":
# 如果脚本内容不为空,则记录日志信息
logger.info("录制完毕!")
# 可选:打印消息
# print(message)
except websockets.exceptions.ConnectionClosedError:
# 如果连接关闭,则从连接列表中移除该客户端
connected_clients.remove(websocket)
logger.warning(f"客户端断开连接: {websocket.remote_address}")
except Exception as e:
# 处理其他类型的异常
logger.error(f"处理客户端消息时发生错误: {e}")
4、查找指定客户端及其他
import time
import pyautogui
def 开伞():
__t = time.time()
if -state.状态_开伞时间 > 2 or state.状态_开伞时间 == -1:
state.状态_开伞时间 = __t
logger.info("检测到可能在下落 打风之翼")
pyautogui.keyDown('space')
time.sleep(0.2)
pyautogui.keyUp('space')
time.sleep(0.4)
pyautogui.keyDown('space')
time.sleep(0.2)
pyautogui.keyUp('space')
state.计次_误差过大 += 1
state.状态_开伞时间 = -1
def find_dict_index(dict_list, key, value):
max_sim = 0
index = None
for i, dictionary in enumerate(dict_list):
if dictionary.get(key) == value:
if dictionary.get('sim') > max_sim:
max_sim = dictionary.get('sim')
index = i
return index != None, index
def find_dict_num(dict_list, key, value):
num_ = 0
for i, dictionary in enumerate(dict_list):
if dictionary.get(key) == value:
num_ += 1
return num_
async def send_to_client(client_id, message):
if len(connected_clients) > 0:
# 查找指定的客户端
await connected_clients[client_id].send(message)
三、获取自动化游戏操作类(AutoYuanShen)
vi auto.py
imgs_duihuahuopengren = [
cv2.imdecode(np.fromfile(file="./datas/对话.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED),
cv2.imdecode(np.fromfile(file="./datas/烹饪.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED),
cv2.imdecode(np.fromfile(file="./datas/钓鱼.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED),
cv2.imdecode(np.fromfile(file="./datas/齿轮.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
]
imgs_pianpianhua = [cv2.imdecode(np.fromfile(file="./datas/薄荷.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED),
cv2.imdecode(np.fromfile(file="./datas/甜甜花.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
]
import pyautogui
from lanrenonnx import *
from lanrentools.moni import *
from screenshot import screenshot
import copy
import traceback
from anglerecognition import template_matching, get_view2
import math
class AutoYuanShen():
def __init__(self, weights, providers=["CUDAExecutionProvider",'CPUExecutionProvider'],dic_labels={},model_h=320, model_w=320):
pyautogui.PAUSE = 0.01
pyautogui.FAILSAFE = False
self.yolov = LanRenOnnxYolov(weights=weights, providers=providers,dic_labels=dic_labels,model_h=model_h, model_w=model_w)
self.interaction_label = ["怪物", "采集物", "矿石","掉落物"]
self.centre_point = [(1920 // 2), (1080 // 2) + 100]
self.key_w_down = True
self.小地图区域 = (87 - 10, 45 - 10, 246 + 10, 204 + 10)
def run_fight(self,wakuang=False,caiji=False,daguai=False):
global loop
# 获取窗口句柄
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
while True:
if state.状态_循环开关 == False:
logger.info("强制退出!")
pyautogui.keyUp('w')
state.状态_是否回放中 = False
state.计数_没找到任何目标 = 0
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_检测中=False
return
if self.get_window_handle_at_mouse_position() == hwnd:
break
state.计数_没找到任何目标 = 0
state.计数_没找到怪物 = 0
state.状态_在爬墙 = False
state.状态_检测中 = True
state.状态_开伞时间 = -1
已经_F了=False
历史_上次玩家高度 = 0.1
玩家高度 = 0.1
计数_没有玩家 = 0
isIn_wxl = False
set_window_activate(hwnd)
jiaoben = (os.path.abspath(os.path.join(state.PATH_JIAOBEN, state.LIANZHAO)))
# 临时发送消息给指定客户端
message = f"解析脚本#@@#{jiaoben}"
asyncio.run_coroutine_threadsafe(send_to_client(-1, message), loop)
time.sleep(2)
while True:
try:
if state.状态_循环开关 == False:
state.状态_是否回放中 = False
state.计数_没找到任何目标 = 0
state.状态_检测中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
return
time.sleep(0.005)
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom = 0, 0, 1920, 1080 # 替换成你实际的区域坐标
# F交互框位置
left_dh, top_dh, right_dh, bottom_dh = [1161, 508, 1319, 572] # 替换成你实际的区域坐标
p_left = rect[0] + w_p
p_top = rect[1] + h_p
计数_没有玩家 += 1
if state.状态_寻路中 == False:
state.计数_没找到任何目标 += 1
state.计数_没找到怪物 += 1
state.状态_在爬墙 = False
if state.状态_已经有寻路了 == False or state.状态_检测中==False:
state.状态_是否回放中 = False
state.计数_没找到任何目标 = 0
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_检测中 = False
return
if state.状态_全局暂停:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
time.sleep(2)
continue
if self.get_window_handle_at_mouse_position() != hwnd:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
time.sleep(0.1)
# 激活hwnd
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
time.sleep(0.2)
if state.状态_传送中:
time.sleep(1)
continue
# 设定保存截图的文件夹路径和文件名前缀
# filename = f"./原神截图/{int(time.time())}{random.randint(1000,9999)}.jpg"
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
if type(ret_scr) == bool:
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
logger.info("截图失败了!可能是内存溢出了")
time.sleep(2)
continue
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
if state.开关_是否展预测结果:
state.QT_信号.mysig_show_yolov.emit()
if state.状态_全局暂停:
pyautogui.keyUp('w')
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
time.sleep(2)
continue
if datas == []:
if state.状态_是否回放中 == False:
if state.状态_在爬墙 == False and state.状态_寻路中 == True:
计数_没有玩家 = 0
ttt = threading.Thread(target=开伞)
if state.python_var > 9:
ttt.daemon = True
else:
ttt.setDaemon(True)
ttt.start()
else:
# 将离屏幕中心最近的点排在前面
datas = sorted(datas, key=self.distance_to_target)
isIn, idex = find_dict_index(datas, 'label', '往上跳')
if isIn:
state.状态_在爬墙 = True
else:
state.状态_在爬墙 = False
isIn_wxl, _ = find_dict_index(datas, 'label', '往下落')
isIn, idex = find_dict_index(datas, 'label', '被控了')
if isIn:
logger.info("被控了!")
pyautogui.press('space')
pyautogui.press('space')
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
continue
isIn_jdx, idex = find_dict_index(datas, 'label', '捡东西')
if isIn_jdx: # 路过的顺手捡了
# 二次排除对话烹饪等
if random.randint(1, 2) == 1:
pyautogui.scroll(300)
else:
pyautogui.scroll(-300)
pyautogui.keyUp('w')
if not self.find_duihuahuopengren(hwnd, left_dh, top_dh, right_dh, bottom_dh, p_left, p_top):
self.key_w_down = False
已经_F了 = True
state.计次_识别次数 = 0
state.计数_卡主次数 = 0
logger.info("顺手捡采集物!", datas[idex])
state.状态_有目标 = True
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
time.sleep(0.1)
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
time.sleep(0.1)
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
time.sleep(0.1)
state.计次_识别次数 = 0
state.计数_卡主次数 = 0
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
else:
pyautogui.keyDown('w')
logger.info(f"是对话或烹饪 {datas[idex]}")
isIn_jdx = False
isIn_jhdh, idex = find_dict_index(datas, 'label', '交互对话')
if isIn_jhdh: # 路过的顺手捡了
# 二次排除对话烹饪 并且识别骗骗花
if random.randint(1, 2) == 1:
pyautogui.scroll(300)
else:
pyautogui.scroll(-300)
pyautogui.keyUp('w')
if not self.find_duihuahuopengren(hwnd, left_dh, top_dh, right_dh, bottom_dh, p_left,
p_top) or self.find_pianpianhua(hwnd, left_dh, top_dh,
right_dh, bottom_dh, p_left,
p_top):
self.key_w_down = False
已经_F了 =True
state.计次_识别次数 = 0
state.计数_卡主次数 = 0
logger.info(f"顺手捡采集物!{datas[idex]}")
state.状态_有目标 = True
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
time.sleep(0.1)
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
time.sleep(0.5)
state.计次_识别次数 = 0
state.计数_卡主次数 = 0
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
else:
pyautogui.keyDown('w')
logger.info(f"是对话或烹饪{datas[idex]}")
isIn_jhdh = False
isIn, idex = find_dict_index(datas, 'label', '在水面')
if isIn:
logger.info("在水面!")
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_寻路中 = True
self.key_w_down = True
if random.randint(1, 4) == 2:
pyautogui.keyDown('shiftleft')
time.sleep(0.6)
pyautogui.keyUp('shiftleft')
isIn, idex = find_dict_index(datas, 'label', '确定')
if isIn:
if state.状态_寻路中:
state.状态_寻路中 = False
state.状态_有目标 = True
state.状态_是否回放中 = False
if self.key_w_down:
self.key_w_down = False
pyautogui.keyUp('w')
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否暂停#@@#真"), loop)
time.sleep(0.2)
mouse_move(p_left + datas[idex]["x"], p_top + datas[idex]["y"])
logger.info(f"发现确定按钮,可能是吃复活药{datas[idex]}")
time.sleep(0.1)
mouse_left_down()
time.sleep(0.1)
mouse_left_up()
time.sleep(0.5)
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
isIn, idex = find_dict_index(datas, 'label', '确定')
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否暂停#@@#假"), loop)
if not isIn:
logger.info("停止脚本")
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
isIn, idex = find_dict_index(datas, 'label', '取消')
if isIn:
if state.状态_寻路中:
state.状态_寻路中 = False
state.状态_是否回放中 = False
if self.key_w_down:
self.key_w_down = False
pyautogui.keyUp('w')
# 判断是否死亡
ret_sw = self.find_zhuyidiren(hwnd, left, top, right, bottom, p_left, p_top)
state.状态_有目标 = True
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否暂停#@@#真"), loop)
time.sleep(0.2)
mouse_move(p_left + datas[idex]["x"], p_top + datas[idex]["y"])
logger.info(f"发现取消按钮,可能是吃复活药{datas[idex]}")
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.3)
pyautogui.keyDown('shift')
time.sleep(0.2)
pyautogui.keyUp('shift')
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否暂停#@@#假"), loop)
if ret_sw:
state.计次_移动停滞 = 1200
state.计次_定位失败 = 1200
state.状态_是否回放中 = False
state.计数_没找到任何目标 = 0
logger.info("全军覆没了!跳过该任务!")
pyautogui.keyDown('esc')
time.sleep(0.1)
pyautogui.keyUp('esc')
state.状态_检测中 = False
return
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
isIn, idex = find_dict_index(datas, 'label', '返回')
if isIn:
logger.info("在派蒙界面,点返回")
pyautogui.keyUp('w')
self.key_w_down = False
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
time.sleep(2)
pyautogui.keyDown('shift')
time.sleep(0.2)
pyautogui.keyUp('shift')
continue
isIn, idex = find_dict_index(datas, 'label', '玩家')
if isIn:
玩家高度 = datas[idex]["points"][1][1] - datas[idex]["points"][0][1]
计数_没有玩家 = 0
isIn, idex = find_dict_index(datas, 'label', '关闭')
if isIn:
if state.状态_寻路中:
state.状态_寻路中 = False
state.状态_有目标 = True
logger.info(f"发现关闭按钮 点错了界面{datas[idex]}")
pyautogui.keyDown('esc')
time.sleep(0.2)
pyautogui.keyUp('esc')
time.sleep(1)
mouse_moveR(6 * 180, 0)
time.sleep(2)
pyautogui.keyDown("w")
time.sleep(0.1)
pyautogui.keyUp("w")
self.key_w_down = False
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
for data in datas:
if isIn_jdx == True:
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
break
if isIn_jhdh == True:
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
break
# 发现怪物则进行视角移动 以及 接近目标
if data["label"] in self.interaction_label:
if state.状态_寻路中 == False:
# 将它的中心点和游戏正中间进行靠近
if data["label"] == "怪物" and data["points"][1][0] - \
data["points"][0][0] >= 70:
if not daguai:
continue
state.计数_没找到任何目标 = 0
state.计数_没找到怪物 = 0
# 如果怪物就再旁边则直接连招
if self.centre_point[1] - data["points"][1][1] <= 20:
if self.key_w_down:
self.key_w_down = False
pyautogui.keyUp('w')
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
if not state.状态_是否回放中:
mouse_moveR(0, 800)
logger.info("在连招中....")
pyautogui.keyDown('x')
time.sleep(0.2)
pyautogui.keyUp('x')
state.状态_是否回放中 = True
asyncio.run_coroutine_threadsafe(send_to_client(-1, "脚本执行#@@#2"), loop)
else:
angle=self.get_angle(self.centre_point, (data["x"], data["y"]))
angle_diff = abs(0 - angle)
if state.状态_在爬墙==True:
#如果角度大于45度则建议按X绕过柱子
if angle_diff>45:
pyautogui.keyDown('x')
time.sleep(0.2)
pyautogui.keyUp('x')
self.setAlign(0, angle)
if random.randint(1,2)==2:
pyautogui.keyDown('a')
time.sleep(0.2)
pyautogui.keyUp('a')
else:
pyautogui.keyDown('d')
time.sleep(0.2)
pyautogui.keyUp('d')
else:
pyautogui.keyDown('space')
time.sleep(0.2)
pyautogui.keyUp('space')
else:
self.setAlign(0, angle)
pyautogui.keyDown('w')
if random.randint(0, 50) == 5:
pyautogui.keyDown('space')
time.sleep(0.2)
pyautogui.keyUp('space')
self.key_w_down = True
break # 只判断第一只怪
elif data["label"] == "矿石":
if not wakuang:
continue
state.计数_没找到任何目标 = 0
state.计数_没找到怪物 = 0
self.setAlign(0, self.get_angle(self.centre_point, (data["x"], data["y"])))
# 如果矿石就再旁边则直接连招
if self.centre_point[1] - data["points"][1][1] <= 40:
if self.key_w_down:
self.key_w_down = False
time.sleep(0.2)
pyautogui.keyUp('w')
logger.info("在挖矿中....")
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
if not state.状态_是否回放中:
logger.info("在挖矿中....")
pyautogui.press("x")
state.状态_是否回放中 = True
asyncio.run_coroutine_threadsafe(send_to_client(-1, "脚本执行#@@#2"), loop)
else:
pyautogui.keyDown('w')
if random.randint(0, 50) == 5:
pyautogui.keyDown('space')
time.sleep(0.2)
pyautogui.keyUp('space')
self.key_w_down = True
break # 只判断第1个矿
else: # '发现采集物'或 "掉落物"
state.计数_没找到任何目标 = 0
angle_now = 0
angle_target = self.get_angle(self.centre_point, (data["x"], data["y"]))
r_caiji=self.setAlign(angle_now, angle_target)
pyautogui.keyDown('w')
if random.randint(0, 30) == 5:
pyautogui.keyDown('space')
pyautogui.keyUp('space')
self.key_w_down = True
if r_caiji==False:
pyautogui.keyDown('shift')
time.sleep(0.2)
pyautogui.keyUp('shift')
break # 只判断第一个采集物
else:
if data["sim"]>=0.6:
if state.状态_在爬墙==True:
continue
if data["label"] == "矿石":
if not wakuang:
continue
logger.info("发现有矿石.顺手挖一下吧!")
已经_F了 = False
elif data["label"] == "怪物" and data["points"][1][
0] - data["points"][0][0] >= 90:
if not daguai:
continue
已经_F了 = False
logger.info("发现有怪拦路.顺手消灭吧!")
elif data["label"] == "采集物":
if not caiji:
continue
已经_F了 = False
logger.info("发现有采集物.顺手捡一下吧!")
elif data["label"] =="掉落物":
已经_F了 = False
logger.info("发现有掉落物.顺手捡一下吧!")
else:
continue
self.setAlign(0, self.get_angle(self.centre_point, (data["x"], data["y"])))
state.计时_未寻路 = int(time.time())
state.状态_寻路中 = False
state.游戏_打怪前坐标 = copy.copy(state.游戏_当前目标坐标)
state.计数_没找到任何目标 = 0
state.计数_没找到怪物 = 0
pyautogui.keyUp('w')
self.key_w_down = False
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
mouse_left_down()
time.sleep(0.1)
mouse_left_up()
break
# print("state.计时_未寻路", state.计时_未寻路, "state.计数_没找到任何目标", state.计数_没找到任何目标,
# "state.计数_没找到怪物", state.计数_没找到怪物)
# 如果玩家突然不见了,说明正在快速下落的概率很大,则开伞
if state.状态_寻路中 == True and state.状态_在爬墙 == False and isIn_wxl == False:
if 计数_没有玩家 >= 3 or (历史_上次玩家高度 / 玩家高度 >= 1.5):
计数_没有玩家 = 0
ttt = threading.Thread(target=开伞)
if state.python_var > 9:
ttt.daemon = True
else:
ttt.setDaemon(True)
ttt.start()
if 计数_没有玩家 == 0:
历史_上次玩家高度 = copy.copy(玩家高度)
if state.状态_寻路中 == False:
# 如果按下了W键了,并且当前没有任何目标等于4次了,则松开w键
if state.计数_没找到任何目标 == 6:
self.key_w_down = False
state.计次_识别次数 = 0
state.计数_卡主次数 = 0
pyautogui.keyUp('w')
if state.计次_定位失败 > 0:
state.计数_没找到怪物 = 0
if state.ON_LIANZHAOBUJIANCE==0 and wakuang==False and state.计数_没找到怪物 >= 25 and state.计次_定位失败 == 0 and state.状态_是否回放中 == True:
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
# 如果连续8次没找到目标,则进行旋转
if 20 > state.计数_没找到任何目标 >= 15 or (
state.计数_没找到任何目标 >= 1 and state.计数_没找到怪物 >= 10 and state.状态_是否回放中 == True):
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
历史_上次玩家高度 = 0
if state.计数_没找到任何目标 == 15:
if state.计次_定位失败 == 0:
if state.状态_是否回放中 == False:
logger.info("重置视角")
pyautogui.keyUp('w')
self.key_w_down = False
pyautogui.keyDown('x')
time.sleep(0.1)
pyautogui.keyUp('x')
# 重置镜头
mouse_middle_down()
time.sleep(0.2)
mouse_middle_up()
time.sleep(0.3)
mouse_moveR(0, 600)
state.计次_识别次数 = 0
state.计数_卡主次数 = 0
else:
state.计数_没找到任何目标 = 0
else:
state.计数_没找到任何目标 = 0
else:
mouse_moveR(700, 0)
time.sleep(0.1)
elif state.计数_没找到任何目标 == 13 and state.状态_是否回放中 == False:
mouse_moveR(0, 800)
elif state.计数_没找到任何目标 == 10 and state.状态_是否回放中 == False:
if not 已经_F了:
pyautogui.keyDown('shift')
time.sleep(0.2)
pyautogui.keyUp('shift')
# 如果连续18次没找到目标,则继续寻路X
elif state.计数_没找到任何目标 >= 20:
if not state.状态_是否回放中:
logger.info("附近没有怪和采集物了.继续寻路吧!")
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_寻路中 = True
state.计次_识别次数 = 0
state.计数_卡主次数 = 0
state.计数_没找到任何目标 = 0
mouse_left_up()
pyautogui.keyUp('w')
continue
time_now=time.time()
if time_now-state.计时_未寻路 >= state.TIMEOUT_DAGUAI:
kkk = self.处理异常情况(hwnd, left, top, right, bottom, p_left, p_top)
if kkk == 0:
logger.info("在意外界面 超时!")
time.sleep(3)
state.状态_检测中 = False
state.状态_已经有寻路了 = False
state.状态_需重新传送 = False
state.计次_移动停滞 = 1200
state.计次_定位失败 = 1200
state.状态_是否回放中 = False
state.计数_没找到任何目标 = 0
return False
elif kkk == -1:
logger.info("全军覆没了!")
time.sleep(3)
state.计次_移动停滞 = 1200
state.计次_定位失败 = 1200
state.状态_是否回放中 = False
state.计数_没找到任何目标 = 0
state.状态_检测中 = False
state.状态_已经有寻路了 = False
state.状态_需重新传送 = True
return False
logger.info("附近可能还有怪,但是打不到.继续寻路吧!")
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_寻路中 = True
state.计数_卡主次数 = 0
mouse_left_up()
pyautogui.keyUp('w')
state.计数_没找到任何目标 = 0
state.游戏_打怪前坐标 = [0, 0]
time.sleep(5)
continue
elif time_now-state.计时_未寻路 == state.TIMEOUT_DAGUAI-15 and state.状态_是否回放中==False and state.计数_没找到怪物>=1:
mouse_left_up()
logger.info("可能是有矿石 连招中....")
if random.randint(1,3)==2:
pyautogui.press("x")
state.状态_是否回放中 = True
asyncio.run_coroutine_threadsafe(send_to_client(-1, "脚本执行#@@#2"), loop)
elif state.TIMEOUT_DAGUAI//2<= time_now-state.计时_未寻路 >= state.TIMEOUT_DAGUAI//2-10 and state.状态_是否回放中==False and state.计数_没找到怪物==0:
mouse_left_up()
logger.info("可能是飞行物 连招中....")
if random.randint(1,3)==2:
pyautogui.press("x")
state.状态_是否回放中 = True
asyncio.run_coroutine_threadsafe(send_to_client(-1, "脚本执行#@@#2"), loop)
except:
time.sleep(0.5)
logger.error(traceback.format_exc())
def run_fuben(self, cishu=99, isFanxiang=False):
global loop
state.状态_已经有寻路了 = True
# 获取窗口句柄
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
while True:
if state.状态_循环开关 == False:
logger.info("强制退出!")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_需重新传送 = False
return False
if self.get_window_handle_at_mouse_position() == hwnd:
break
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom = 0, 0, 1920, 1080 # 替换成你实际的区域坐标
p_left = rect[0] + w_p
p_top = rect[1] + h_p
paimeng_img = cv2.imdecode(np.fromfile(file="./datas/派蒙.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
kkk = self.处理异常情况(hwnd, left, top, right, bottom, p_left, p_top)
if kkk == 0:
logger.info("在意外界面 超时!")
time.sleep(3)
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
return False
elif kkk == -1:
logger.info("全军覆没了!")
time.sleep(3)
state.状态_需重新传送 = True
state.状态_已经有寻路了 = False
return False
计数_没有玩家 = 0
jiaoben = (os.path.abspath(os.path.join(state.PATH_JIAOBEN, state.LIANZHAO)))
# 临时发送消息给指定客户端
message = f"解析脚本#@@#{jiaoben}"
asyncio.run_coroutine_threadsafe(send_to_client(-1, message), loop)
try:
# 往前走或者往后走,直到找到 F
if not self.find_f_keep_going(hwnd, left, top, right, bottom, p_left, p_top,
isFanxiang): state.状态_需重新传送 = True;return False
# 点击 单人挑战
if not self.click_danrentiaozhan(hwnd, left, top, right, bottom, p_left,
p_top): state.状态_需重新传送 = True;return False
# 判断弹窗是否有树脂不够提醒
if self.find_shuzhibugou(hwnd, left, top, right, bottom, p_left, p_top):
pyautogui.keyDown('esc')
time.sleep(0.1)
pyautogui.keyUp('esc')
time.sleep(2)
pyautogui.keyDown('esc')
time.sleep(0.1)
pyautogui.keyUp('esc')
state.状态_已经有寻路了 = False
logger.info("没体力了 成功退出副本!")
time.sleep(10)
state.状态_需重新传送 = False
return True
# 如果没提醒则选择阵容
if not self.click_kaishitiaozhan(hwnd, left, top, right, bottom, p_left, p_top):
pyautogui.keyDown('esc')
time.sleep(0.1)
pyautogui.keyUp('esc')
state.状态_需重新传送 = True
state.状态_已经有寻路了 = False
return False
# 开始yolov自动进入副本
except:
time.sleep(0.5)
logger.error(traceback.format_exc())
state.状态_需重新传送 = True
state.状态_已经有寻路了 = False
return False
# 0 镜头对准北方向 走过去开副本
# 1 开始打怪阶段
# 2 打完了 开始找生命树 并且把自己移动到0° 对准生命树的位置
# 3 走路到 领取奖励 直到点击 继续挑战为止 完成一轮
阶段 = 0
计次_阶段0 = 0
计时_阶段3_开始时间 = 0
state.计数_没找到怪物=0
isinF = False
for i in range(cishu):
logger.info(f"刷副本中 {i + 1}/{cishu}")
while True:
try:
time.sleep(0.01)
if state.状态_循环开关 == False:
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
return False
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom = 0, 0, 1920, 1080 # 替换成你实际的区域坐标
p_left = rect[0] + w_p
p_top = rect[1] + h_p
if state.状态_全局暂停:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
time.sleep(2)
continue
if self.get_window_handle_at_mouse_position() != hwnd:
# 激活hwnd
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
# 设定保存截图的文件夹路径和文件名前缀
# filename = f"./datas/lanrenyolov{random.randint(1, 20)}.jpg"
# 游戏全图
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
if type(ret_scr) == bool:
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
logger.info("截图失败了!可能是内存溢出了")
time.sleep(2)
continue
# 这里是小地图的截图
left_small, top_small, right_small, bottom_small = self.小地图区域 # 替换成你实际的区域坐标
height_small = bottom_small - top_small
width_small = right_small - left_small
small_img = screenshot(hwnd, left_small, top_small, right_small, bottom_small,
filename=None) # RGBA
if type(small_img) == bool:
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
time.sleep(2)
continue
state.游戏_当前视野角度 = get_view2(small_img[int(height_small / 2) - 70:int(height_small / 2) + 70,
int(width_small / 2) - 70:int(width_small / 2) + 70], 70)
# -----------------------------YOLOV检测--------------------------------------------------
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
if state.开关_是否展预测结果:
state.QT_信号.mysig_show_yolov.emit()
if state.状态_全局暂停:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
time.sleep(2)
continue
if datas == []:
pass
else:
# 将离屏幕中心最近的点排在前面
datas = sorted(datas, key=self.distance_to_target)
if 阶段 == 0: # 0 镜头对准北方向 走过去开副本
计次_阶段0 += 1
if 计次_阶段0 >= 2000:
pyautogui.keyDown('esc')
time.sleep(0.1)
pyautogui.keyUp('esc')
time.sleep(1)
state.状态_已经有寻路了 = False
logger.info("没体力了 成功退出副本!")
time.sleep(10)
state.状态_需重新传送 = False
return True
isIn, idex = find_dict_index(datas, 'label', '提示关闭')
if isIn: # 路过的顺手捡了
logger.info("发现 提示关闭")
mouse_move(p_left + datas[idex]["x"], p_top + datas[idex]["y"])
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(1)
continue
if self.click_dimaiyichang(hwnd, left, top, right, bottom, p_left, p_top):
time.sleep(1)
continue
if not self.setAlign(state.游戏_当前视野角度, 90, fault=5):
logger.info("调整整好")
# 角度对了 开始一路往前走 直到找到F按钮
pyautogui.keyDown("w")
if not self.find_f_keep_going(hwnd, left, top, right, bottom, p_left, p_top,
False): continue
pyautogui.keyUp("w")
阶段 = 1
isIn, idex = find_dict_index(datas, 'label', '确定')
isIn2, idex = find_dict_index(datas, 'label', '取消')
# 判断弹窗是否有树脂不够提醒 如果发现 则点击退出返回 阶段4走起
if isIn or isIn2:
pyautogui.keyDown('esc')
time.sleep(0.1)
pyautogui.keyUp('esc')
time.sleep(1)
state.状态_已经有寻路了 = False
logger.info("没体力了 成功退出副本!")
time.sleep(10)
state.状态_需重新传送 = True
return True
elif 阶段 == 1: # 1 开始打怪阶段
isinF = False
计次_阶段0 = 0
# 判断是否打完了副本
isIn, idex = find_dict_index(datas, 'label', '副本打完了')
if isIn or self.find_zidongtuichu(hwnd, 745,905,1206,1002, p_left, p_top):
if idex!=None:
if datas[idex]["sim"]>0.75:
pyautogui.keyUp('w')
mouse_left_up()
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
logger.info("打完了")
time.sleep(2)
# 重置镜头
mouse_middle_down()
time.sleep(0.1)
mouse_middle_up()
阶段 = 2
continue
else:
pyautogui.keyUp('w')
mouse_left_up()
state.状态_是否回放中 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
logger.info("打完了")
time.sleep(2)
# 重置镜头
mouse_middle_down()
time.sleep(0.1)
mouse_middle_up()
阶段 = 2
continue
isIn, idex = find_dict_index(datas, 'label', '往上跳')
if isIn:
logger.info("尝试跳起来")
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
isIn, idex = find_dict_index(datas, 'label', '被控了')
if isIn:
logger.info("被控了!")
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
time.sleep(0.1)
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
time.sleep(0.1)
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
time.sleep(0.1)
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
isIn, idex = find_dict_index(datas, 'label', '确定')
if isIn:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否暂停#@@#真"), loop)
time.sleep(0.2)
mouse_move(p_left + datas[idex]["x"], p_top + datas[idex]["y"])
logger.info(f"发现确定按钮,可能是吃复活药{datas[idex]}")
time.sleep(0.1)
mouse_left_down()
time.sleep(0.1)
mouse_left_up()
time.sleep(0.5)
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
isIn, idex = find_dict_index(datas, 'label', '确定')
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否暂停#@@#假"), loop)
if not isIn:
logger.info("停止脚本")
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
isIn, idex = find_dict_index(datas, 'label', '取消')
if isIn:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否暂停#@@#真"), loop)
time.sleep(0.2)
mouse_move(p_left + datas[idex]["x"], p_top + datas[idex]["y"])
logger.info(f"发现取消按钮,可能是吃复活药{datas[idex]}")
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.3)
pyautogui.keyDown('shift')
time.sleep(0.2)
pyautogui.keyUp('shift')
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否暂停#@@#假"), loop)
ret_scr = screenshot(hwnd, left, top, right, bottom, filename=None)
datas, state.图片_YOLOV = self.yolov.detect(ret_scr, plot_box=state.开关_是否展预测结果)
isIn, idex = find_dict_index(datas, 'label', '怪物')
if not isIn:
state.计数_没找到怪物+=1
if random.randint(1, 50) == 2:
mouse_middle_down()
time.sleep(0.2)
mouse_middle_up()
if state.计数_没找到怪物>=20:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_是否回放中 = False
pyautogui.keyUp('w')
mouse_moveR(int(6 * 120), 0)
time.sleep(0.1)
if template_matching(ret_scr, paimeng_img, mask=None) != []:
logger.info("全军覆没了!跳重试该任务!")
state.状态_需重新传送 = True
state.状态_已经有寻路了 = False
return False
else:
state.计数_没找到怪物=0
for data in datas:
# 将它的中心点和游戏正中间进行靠近
if data["label"] == "怪物":
self.setAlign(0, self.get_angle(self.centre_point, (data["x"], data["y"])))
# 如果怪物就再旁边则直接连招
if self.centre_point[1] - data["points"][1][1] <= 80 and data["points"][1][0] - \
data["points"][0][0] >= 60:
if self.key_w_down:
self.key_w_down = False
pyautogui.keyUp('w')
if not state.状态_是否回放中:
logger.info("在连招中....")
pyautogui.press("x")
state.状态_是否回放中 = True
asyncio.run_coroutine_threadsafe(send_to_client(-1, "脚本执行#@@#2"), loop)
else:
# 不在则继续跟着怪物方向走
pyautogui.keyDown('w')
self.key_w_down = True
break
elif 阶段 == 2: # 2 打完了 开始找生命树 并且把自己移动到0° 对准生命树的位置
调整视角 = self.setAlign(state.游戏_当前视野角度, 90, fault=6)
发现了目标了 = False
isIn, idex = find_dict_index(datas, 'label', '生命树')
for data in datas:
if data["label"] == "生命树":
发现了目标了 = True
x_p = data["x"] - self.centre_point[0]
if x_p >= 16:
pyautogui.keyUp('a')
logger.info("树偏右边 按下D键")
pyautogui.keyDown('d')
elif x_p <= -16:
pyautogui.keyUp('d')
logger.info("树偏左边 按下A键")
pyautogui.keyDown('a')
else:
if 调整视角 == False:
pyautogui.keyUp('d')
pyautogui.keyUp('a')
logger.info("树正对,走起!")
pyautogui.keyDown('w')
阶段 = 3
计时_阶段3_开始时间 = time.time()
break
logger.info("树正对 但是角度还不对")
elif data["label"] == "副本楼梯":
发现了目标了 = True
x_p = data["x"] - self.centre_point[0]
if x_p >= 16:
pyautogui.keyUp('a')
logger.info("副本楼梯偏右边 按下D键")
pyautogui.keyDown('d')
elif x_p <= -16:
pyautogui.keyUp('d')
logger.info("副本楼梯偏左边 按下A键")
pyautogui.keyDown('a')
else:
if 调整视角 == False:
pyautogui.keyUp('d')
pyautogui.keyUp('a')
logger.info("副本楼梯正对,走起!")
pyautogui.keyDown('w')
阶段 = 3
计时_阶段3_开始时间=time.time()
break
logger.info("副本楼梯正对 但是角度还不对")
elif data["label"] == "副本门框" and isIn == False:
发现了目标了 = True
x_p = data["x"] - self.centre_point[0]
if x_p >= 16:
pyautogui.keyUp('a')
logger.info("副本门框偏右边 按下D键")
pyautogui.keyDown('d')
elif x_p <= -16:
pyautogui.keyUp('d')
logger.info("副本门框偏左边 按下A键")
pyautogui.keyDown('a')
if 阶段 == 3:
continue
if 发现了目标了 == False:
rrrr = random.randint(1, 20)
if rrrr == 1:
pyautogui.keyDown('s')
pyautogui.keyDown('a')
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
time.sleep(0.2)
pyautogui.keyUp('a')
pyautogui.keyUp('s')
elif rrrr == 2:
pyautogui.keyDown('s')
pyautogui.keyDown('a')
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
time.sleep(0.2)
pyautogui.keyUp('a')
pyautogui.keyUp('s')
elif rrrr == 3:
pyautogui.keyDown('w')
pyautogui.keyDown('a')
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
time.sleep(0.2)
pyautogui.keyUp('a')
pyautogui.keyUp('w')
elif rrrr == 4:
pyautogui.keyDown('w')
pyautogui.keyDown('d')
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
time.sleep(0.2)
pyautogui.keyUp('d')
pyautogui.keyUp('w')
else:
pyautogui.keyDown('s')
time.sleep(0.1)
pyautogui.keyUp('s')
elif 阶段 == 3: # 3 走路到 领取奖励 直到点击 继续挑战为止 完成一轮
pyautogui.keyDown('w')
isIn, idex = find_dict_index(datas, 'label', '交互对话')
if isIn: # 路过的顺手捡了
logger.info("发现 F 交互")
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
isinF = True
continue
isIn, idex = find_dict_index(datas, 'label', '捡东西')
if isIn: # 路过的顺手捡了
logger.info("发现 F 交互")
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
isinF = True
continue
isIn, idex = find_dict_index(datas, 'label', '生命树')
if isIn and isinF==False : # 路过的顺手捡了
self.setAlign(0, self.get_angle(self.centre_point, (datas[idex]["x"], datas[idex]["y"])))
isIn, idex = find_dict_index(datas, 'label', '使用浓缩树脂')
if isIn: # 路过的顺手捡了
if datas[idex]['sim'] > 0.7:
logger.info("发现 使用浓缩树脂")
mouse_move(p_left + datas[idex]["x"], p_top + datas[idex]["y"])
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.5)
continue
isIn, idex = find_dict_index(datas, 'label', '使用原粹树脂')
if isIn: # 路过的顺手捡了
if datas[idex]['sim'] > 0.7:
logger.info("发现 使用原粹树脂")
mouse_move(p_left + datas[idex]["x"], p_top + datas[idex]["y"])
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.5)
continue
isIn, idex = find_dict_index(datas, 'label', '继续挑战')
if isIn or self.find_jixutiaozhan(hwnd, 953,923,1432,1065, p_left, p_top): # 路过的顺手捡了
if idex!=None:
if datas[idex]['sim'] > 0.75:
logger.info("发现 继续挑战")
mouse_move(p_left + 1178, p_top + 1006)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.5)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(2)
# 判断弹窗是否有树脂不够提醒 如果发现 则点击退出返回 阶段4走起
if self.find_shuzhibugou2(hwnd, left, top, right, bottom, p_left, p_top):
pyautogui.keyDown('esc')
time.sleep(0.1)
pyautogui.keyUp('esc')
time.sleep(1)
state.状态_已经有寻路了 = False
logger.info("没体力了 成功退出副本!")
time.sleep(10)
state.状态_需重新传送 = False
return True
else:
isinF = False
阶段 = 0
break
else:
logger.info("发现 继续挑战")
mouse_move(p_left + 1178, p_top + 1006)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.5)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(2)
# 判断弹窗是否有树脂不够提醒 如果发现 则点击退出返回 阶段4走起
if self.find_shuzhibugou2(hwnd, left, top, right, bottom, p_left, p_top):
pyautogui.keyDown('esc')
time.sleep(0.1)
pyautogui.keyUp('esc')
time.sleep(1)
state.状态_已经有寻路了 = False
logger.info("没体力了 成功退出副本!")
time.sleep(10)
state.状态_需重新传送 = False
return True
else:
isinF = False
阶段 = 0
break
if time.time() - 计时_阶段3_开始时间 > 30:
rand_cz = random.randint(0, 4)
if rand_cz == 0:
pyautogui.keyDown('x')
time.sleep(0.3)
pyautogui.keyUp('x')
pyautogui.keyUp('w')
pyautogui.keyDown('d')
time.sleep(0.5)
pyautogui.keyUp('d')
pyautogui.keyDown('w')
elif rand_cz == 1:
pyautogui.keyDown('x')
time.sleep(0.3)
pyautogui.keyUp('x')
pyautogui.keyUp('w')
pyautogui.keyDown('a')
time.sleep(0.5)
pyautogui.keyUp('a')
pyautogui.keyDown('w')
elif rand_cz == 2:
pyautogui.keyDown('x')
time.sleep(0.3)
pyautogui.keyUp('x')
elif rand_cz == 3:
pyautogui.keyDown('x')
time.sleep(0.3)
pyautogui.keyUp('x')
pyautogui.keyUp('w')
pyautogui.keyDown('s')
time.sleep(3)
pyautogui.keyUp('s')
pyautogui.keyDown('w')
else:
pyautogui.keyDown('space')
time.sleep(0.3)
pyautogui.keyDown('space')
计时_阶段3_开始时间 = time.time()
except:
logger.error(traceback.format_exc())
time.sleep(0.5)
state.状态_已经有寻路了 = False
logger.info("刷副本全部完成!")
def run_chuansongjiaoben(self, jiaoben,cbb_address="手动录制"):
global new_msg
try:
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
if state.状态_循环开关 == False:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
logger.info("强制退出!")
return False
if self.get_window_handle_at_mouse_position() == hwnd:
set_window_activate(hwnd)
time.sleep(0.2)
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
p_left = rect[0] + w_p
p_top = rect[1] + h_p
set_window_activate(hwnd)
if cbb_address=="手动录制":
kkk = self.处理异常情况(hwnd, 0, 0, 1920, 1080, p_left, p_top)
if kkk == 0:
logger.info("在意外界面 超时!")
time.sleep(3)
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
return False
elif kkk == -1:
logger.info("全军覆没了!")
time.sleep(3)
state.状态_需重新传送 = True
state.状态_已经有寻路了 = False
return False
# 临时发送消息给指定客户端
message = f"解析脚本#@@#{jiaoben}"
asyncio.run_coroutine_threadsafe(send_to_client(-1, message), loop)
time.sleep(2)
state.状态_是否回放中 = True
asyncio.run_coroutine_threadsafe(send_to_client(-1, "脚本执行#@@#1"), loop)
while True:
try:
time.sleep(0.1)
if state.状态_是否回放中 == False:
state.状态_需重新传送 = False
return True
if state.状态_循环开关 == False:
state.状态_需重新传送 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
return False
if self.get_window_handle_at_mouse_position() != hwnd:
# 激活hwnd
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
time.sleep(0.2)
except:
time.sleep(0.5)
except:
logger.error(traceback.format_exc())
state.状态_需重新传送 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
return False
def run_jiaoben(self,jiaoben,is_auto_f):
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# F交互框位置
left_dh, top_dh, right_dh, bottom_dh = [1161, 508, 1319, 572] # 替换成你实际的区域坐标
p_left = rect[0] + w_p
p_top = rect[1] + h_p
set_window_activate(hwnd)
# 判断是否卡在了对话界面
left, top, right, bottom = 0, 0, 1920, 1080 # 替换成你实际的区域坐标
kkk = self.处理异常情况(hwnd, left, top, right, bottom, p_left, p_top)
if kkk == 0:
logger.info("在意外界面 超时!")
time.sleep(3)
state.状态_需重新传送 = False
return False
elif kkk == -1:
logger.info("全军覆没了!")
time.sleep(3)
state.状态_需重新传送 = True
return False
if state.状态_循环开关 == False:
logger.info("强制退出!")
state.状态_已经有寻路了 = False
return False
message = f"解析脚本#@@#{jiaoben}"
asyncio.run_coroutine_threadsafe(send_to_client(-1, message), loop)
time.sleep(2)
asyncio.run_coroutine_threadsafe(send_to_client(-1, "脚本执行#@@#1"), loop)
state.状态_是否回放中 = True
try:
while True:
try:
time.sleep(0.1)
if state.状态_是否回放中 == False:
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
logger.info("脚本运行完毕!")
return True
if state.状态_循环开关 == False:
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
logger.info("强制退出!")
return False
if self.get_window_handle_at_mouse_position() != hwnd:
# 激活hwnd
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
time.sleep(0.2)
if is_auto_f:
if self.find_f(hwnd, 1050, 424, 1187, 648, p_left, p_top):
time.sleep(0.2)
if not self.find_duihuahuopengren(hwnd, left_dh, top_dh, right_dh, bottom_dh, p_left,
p_top) or self.find_pianpianhua(hwnd, left_dh, top_dh,
right_dh, bottom_dh, p_left,
p_top):
logger.info(f"顺手捡东西!")
state.状态_有目标 = True
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
time.sleep(0.1)
pyautogui.keyDown('f')
time.sleep(0.1)
pyautogui.keyUp('f')
except:
time.sleep(0.5)
except:
logger.error(traceback.format_exc())
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
return False
def run_jixing(self):
'''
领取纪行奖励
:return:
'''
global new_msg
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom = 0, 0, 1920, 1080 # 替换成你实际的区域坐标
p_left = rect[0] + w_p
p_top = rect[1] + h_p
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
time.sleep(0.2)
paimeng_img = cv2.imdecode(np.fromfile(file="./datas/派蒙.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
big_img = screenshot(hwnd, left, top, right, bottom, None)
if template_matching(big_img, paimeng_img, mask=None) == []:
pyautogui.keyDown("esc")
time.sleep(0.1)
pyautogui.keyUp("esc")
time.sleep(2)
pyautogui.keyDown('alt')
if not self.click_jixing(hwnd, left, top, right, bottom, p_left, p_top): logger.info(
"没有纪行要领取");pyautogui.keyUp('alt');return
time.sleep(1)
pyautogui.keyUp('alt')
time.sleep(2)
mouse_move(p_left + 962, p_top + 52)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
if not self.click_jixing_yijianlingqu(hwnd, left, top, right, bottom, p_left, p_top): logger.info(
"领取纪行任务经验失败");return
time.sleep(1)
mouse_move(p_left + 863, p_top + 55)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(1)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(1)
if not self.click_jixing_yijianlingqu(hwnd, left, top, right, bottom, p_left, p_top): logger.info(
"没有奖励能领取");return
time.sleep(1)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(1)
pyautogui.keyDown("esc")
time.sleep(0.1)
pyautogui.keyUp("esc")
def run_huanjue(self,path_list=[],zhanchagn="1"):
global new_msg
state.状态_检测中 = False
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom = 0, 0, 1920, 1080 # 替换成你实际的区域坐标
p_left = rect[0] + w_p
p_top = rect[1] + h_p
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
time.sleep(0.2)
kkk = self.处理异常情况(hwnd, left, top, right, bottom, p_left, p_top)
if kkk == 0:
logger.info("在意外界面 超时!")
time.sleep(3)
state.状态_检测中 = False
state.状态_已经有寻路了=False
state.状态_需重新传送 = False
return False
elif kkk == -1:
logger.info("全军覆没了!")
time.sleep(3)
state.状态_检测中 = False
state.状态_已经有寻路了 = False
state.状态_需重新传送 = True
return False
#按了后看有没有打开成功
n=0
while True:
if n==0:
n=0
kkk = self.处理异常情况(hwnd, left, top, right, bottom, p_left, p_top)
if kkk == 0:
logger.info("在意外界面 超时!")
time.sleep(3)
state.状态_检测中 = False
state.状态_已经有寻路了 = False
state.状态_需重新传送 = False
return False
elif kkk == -1:
logger.info("全军覆没了!")
time.sleep(3)
state.状态_检测中 = False
state.状态_已经有寻路了 = False
state.状态_需重新传送 = True
return False
pyautogui.keyDown("x")
time.sleep(0.2)
pyautogui.keyUp("x")
pyautogui.keyDown("l")
time.sleep(0.2)
pyautogui.keyUp("l")
time.sleep(2)
elif n>=20:
n = 0
time.sleep(1)
if state.状态_循环开关 == False:
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
logger.info("强制退出!")
return False
if self.get_window_handle_at_mouse_position() != hwnd:
# 激活hwnd
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
time.sleep(0.2)
if state.状态_全局暂停:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
time.sleep(2)
continue
if self.click_kuaisubiandui(hwnd, left, top, right, bottom , p_left, p_top):
time.sleep(2)
break
n += 1
#加载角色图片数据
img_datas=[]
point_list=[[96,189],
[255,198],
[404,200],
[568,190],
]
for item in path_list:
img_datas.append( cv2.imdecode(np.fromfile(file=item, dtype=np.uint8),cv2.IMREAD_UNCHANGED))
#将历史角色清空掉
for point in point_list:
#点击对应坑位
mouse_move(p_left +point[0],p_top +point[1])
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.2)
if not self.find_baocunpeizhi(hwnd, left, top, right, bottom, p_left, p_top):
break
logger.info("清空角色成功!")
# 循环选择角色
a = 0
for i,img in enumerate(img_datas):
ret =False
while not ret:
logger.info(str(a))
if state.状态_循环开关 == False:
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
logger.info("强制退出!")
return False
if self.get_window_handle_at_mouse_position() != hwnd:
# 激活hwnd
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
time.sleep(0.2)
if state.状态_全局暂停:
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
time.sleep(2)
continue
ret= self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, f"勾选 角色{i+1}", 1,
1,
threshold=0.76)
if ret:
time.sleep(1)
break
if 5<a<11:
pyautogui.moveTo(p_left +342,p_top +415)
pyautogui.dragTo(p_left +346,p_top +833, duration=0.5, tween=pyautogui.easeOutSine)
else:
pyautogui.moveTo(p_left +346, p_top +833)
pyautogui.dragTo(p_left +342, p_top +415, duration=0.5, tween=pyautogui.easeOutSine)
a+=1
if a==11:
a=0
time.sleep(1)
#保存配置
mouse_move(p_left +453,p_top +1016)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(5)
if state.状态_循环开关 == False:
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
logger.info("强制退出!")
return False
pyautogui.keyDown("esc")
time.sleep(0.1)
pyautogui.keyUp("esc")
time.sleep(2)
pyautogui.keyDown(zhanchagn)
time.sleep(0.1)
pyautogui.keyUp(zhanchagn)
time.sleep(2)
state.状态_需重新传送 = False
state.状态_已经有寻路了 = False
return True
def 处理异常情况(self,hwnd,left,top,right,bottom,p_left,p_top):
'''
:param hwnd:
:param left:
:param top:
:param right:
:param bottom:
:param p_left:
:param p_top:
:return:
'''
paimeng_img = cv2.imdecode(np.fromfile(file="./datas/派蒙.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
# 判断是否卡在了对话界面
for nnn in range(120):
if state.状态_循环开关 == False:
logger.info("强制退出!")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_是否回放中 = False
state.状态_检测中=False
asyncio.run_coroutine_threadsafe(send_to_client(-1, "是否回放#@@#假"), loop)
state.状态_需重新传送 = False
return 1
big_img = screenshot(hwnd, left, top, right, bottom, None)
rrrr = template_matching(big_img, paimeng_img, mask=None)
if rrrr == []:
rdd = random.randint(1, 4)
if rdd == 1:
pyautogui.keyDown("x")
time.sleep(0.2)
pyautogui.keyUp("x")
mouse_move(p_left + 1397, p_top + 804)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.2)
elif rdd == 2:
pyautogui.keyDown("esc")
time.sleep(0.1)
pyautogui.keyUp("esc")
time.sleep(1)
else:
pyautogui.keyDown('space')
time.sleep(0.1)
pyautogui.keyUp('space')
logger.info("未知界面 按esc 和 点击对话! " + str(nnn))
# 判断是否死亡
ret_sw = self.find_zhuyidiren(hwnd, left, top, right, bottom, p_left, p_top)
if ret_sw:
state.状态_检测中 = False
return -1
else:
if rrrr != []:
return 1
time.sleep(1)
return 0
def find_img_and_click(self, img, hwnd, left, top, right, bottom, p_left, p_top, label="", times=10, step=1.0,
isClick=True, threshold=0.8, keepKey=None, mask=None, double_click=False):
for _ in range(times):
time.sleep(step)
if state.状态_循环开关 == False:
return False
if keepKey != None:
pyautogui.keyDown(keepKey)
big_img = screenshot(hwnd, left, top, right, bottom, None)
res = template_matching(big_img, img, mask=mask, threshold=threshold)
for result in res:
result_post = [result["result"][0], result["result"][1]]
logger.info(label)
if isClick:
mouse_move(p_left + result_post[0], p_top + result_post[1])
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
if double_click:
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
return True
return False
def find_img_and_click2(self, img, big_img, p_left, p_top, label="",
isClick=True, threshold=0.75, keepKey=None, mask=None, double_click=False):
if state.状态_循环开关 == False:
return False
if keepKey != None:
pyautogui.keyDown(keepKey)
try:
res = template_matching(big_img, img, mask=mask, threshold=threshold)
for result in res:
result_post = [result["result"][0], result["result"][1]]
logger.info(label)
# if state.开关_是否展预测结果:
# # 转换为 NumPy 数组
# pts = np.array((result['rectangle'][0], result['rectangle'][1], result['rectangle'][3],
# result['rectangle'][2]), np.int32)
# cv2.circle(big_img, result_post, 10, (0, 0, 255), -1)
# cv2.polylines(big_img, [pts], True, (0, 255, 0), 5)
# cv2.imwrite("./output.jpg", big_img)
# state.QT_信号.mysig_show_xunlu.emit()
if isClick:
mouse_move(p_left + result_post[0], p_top + result_post[1])
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
if double_click:
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
return True
except:
logger.error(traceback.format_exc())
return False
def find_imgs_and_click(self, imgs, hwnd, left, top, right, bottom, p_left, p_top, label="", times=10, step=1.0,
isClick=True, threshold=0.8, isMask=False):
for _ in range(times):
time.sleep(step)
big_img = screenshot(hwnd, left, top, right, bottom, None)
for img in imgs:
if isMask:
mask = img[:, :, 3] # 提取透明度通道作为掩码
else:
mask = None
ret = self.find_img_and_click2(img, big_img, p_left, p_top, label=label, isClick=isClick,
threshold=threshold, mask=mask)
if ret:
return True
return False
def find_f_keep_going(self, hwnd, left, top, right, bottom, p_left, p_top, isFanxiang):
img = cv2.imdecode(np.fromfile(file="./datas/F.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
if isFanxiang:
keepKey = 's'
else:
keepKey = 'w'
if self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 F", 1000, 0.05, False,
keepKey=keepKey):
if isFanxiang:
pyautogui.keyUp('s')
else:
pyautogui.keyUp('w')
pyautogui.keyDown('f')
time.sleep(0.2)
pyautogui.keyUp('f')
time.sleep(0.3)
pyautogui.keyDown('f')
time.sleep(0.2)
pyautogui.keyUp('f')
return True
else:
return False
def click_danrentiaozhan(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/单人挑战.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 单人挑战")
def click_dimaiyichang(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/地脉异常.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 地脉异常", 1, 0.01,
threshold=0.76)
def click_jixing(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/纪行.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
mask = img[:, :, 3] # 提取透明度通道作为掩码
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 纪行", 10, 1,
threshold=0.76, mask=mask, double_click=True)
def click_jixing_renwu(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/纪行_任务.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
mask = img[:, :, 3] # 提取透明度通道作为掩码
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 纪行_任务", 10, 1,
threshold=0.6, mask=mask)
def click_jixing_yijianlingqu(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/纪行_一键领取.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 纪行_一键领取", 10, 1,
threshold=0.76)
def click_kuaisubiandui(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/快速编队.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 快速编队", 1, 1,
threshold=0.76)
def find_baocunpeizhi(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/保存配置.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "查找 保存配置", 1, 1,
threshold=0.76,isClick=False)
def click_jixing_jixing(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/纪行_纪行.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
mask = img[:, :, 3] # 提取透明度通道作为掩码
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 纪行_纪行", 10, 1,
threshold=0.76, mask=mask)
def click_kaishitiaozhan(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/开始挑战.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 开始挑战")
def find_shuzhibugou(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/树脂不够.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
mask = img[:, :, 3] # 提取透明度通道作为掩码
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 树脂不够", 3,
threshold=0.65, isClick=False, mask=mask)
def find_shuzhibugou2(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/收取完成秘境.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
mask = img[:, :, 3] # 提取透明度通道作为掩码
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "发现 树脂不够", 3,
threshold=0.65, isClick=False, mask=mask)
def find_zidongtuichu(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/自动退出.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
mask = img[:, :, 3] # 提取透明度通道作为掩码
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "发现 自动退出", 1,
threshold=0.65, isClick=False, mask=mask,step=0.001)
def find_jixutiaozhan(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/继续挑战.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
mask = None # 提取透明度通道作为掩码
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "发现 继续挑战", 1,
threshold=0.65, isClick=False, mask=mask,step=0.001)
def find_f(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/f.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
mask =None# img[:, :, 3] # 提取透明度通道作为掩码
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "寻找 F", 1,step=0.001,
threshold=0.75, isClick=False, mask=mask)
def find_zhuyidiren(self, hwnd, left, top, right, bottom, p_left, p_top):
img = cv2.imdecode(np.fromfile(file="./datas/注意敌人.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return self.find_img_and_click(img, hwnd, left, top, right, bottom, p_left, p_top, "点击 注意敌人", 2,
step=0.01,
threshold=0.9, isClick=True)
def find_duihuahuopengren(self, hwnd, left, top, right, bottom, p_left, p_top):
global imgs_duihuahuopengren
return self.find_imgs_and_click(imgs_duihuahuopengren, hwnd, left, top, right, bottom, p_left, p_top, "发现 对话或烹饪", 1,
step=0.001, isClick=False)
def find_pianpianhua(self, hwnd, left, top, right, bottom, p_left, p_top):
global imgs_pianpianhua
return self.find_imgs_and_click(imgs_pianpianhua, hwnd, left, top, right, bottom, p_left, p_top, "发现 骗骗花", 1,
step=0.001, isClick=False)
def distance_to_target(self, d):
h = float(d["points"][1][0] - d["points"][0][0])
w = float(d["points"][1][1] - d["points"][0][1])
# # # 计算距离的平方,这里使用欧几里得距离的平方作为衡量标准
dist = (d["x"] - self.centre_point[0]) ** 2 + (d["y"] - self.centre_point[1]) ** 2
angle = self.get_angle(self.centre_point, [d["x"], d["y"]])
if angle >= 300 or angle <= 60:
dist *= 999
if w < 60:
dist *= 0.1
return dist + w * h
def setAlign(self, angle_now, angle_target, fault=6):
angle_diff = abs(angle_now - angle_target)
if angle_diff <= fault:
return False
angle_diff = int(angle_diff % 360)
if angle_diff > 180:
fangxiang = -1
angle_diff = 360 - angle_diff
else:
fangxiang = 1
if abs(angle_diff)< fault:
return True
if angle_diff >= 100:
mouse_moveR(int(fangxiang * 6 * angle_diff), 0)
else:
mouse_moveR(int(fangxiang * 5 * angle_diff), 0)
time.sleep(0.1)
return True
def get_angle(self, start_pos, next_pos):
# 计算方向向量
direction_x = next_pos[0] - start_pos[0]
direction_y = next_pos[1] - start_pos[1]
# 使用反正切函数计算弧度角度值
angle_rad = math.atan2(direction_y, direction_x)
# 将弧度角度值转换为以北方向为0度的角度
angle_deg = math.degrees(angle_rad)
# 将角度值转换为顺时针方向
angle_deg = int((angle_deg + 360 + 90) % 360)
return angle_deg
def get_window_handle_at_mouse_position(self, ):
active_hwnd = ctypes.windll.user32.GetForegroundWindow()
return active_hwnd
1、添加模型推理图片位置
vi lanrenonnx.py
import os
import random
import time
import onnxruntime as ort
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
def put_chinese_text_on_image(img, label, position, color=[0,255,0],font_path= 'Arial.Unicode.ttf', font_size=15, font_thickness=5):
"""
在OpenCV图像上绘制中文字符串。
参数:
img -- OpenCV图像。
label -- 要绘制的字符串。
position -- 字符串的起始位置 (x, y) 元组。
color -- 字体颜色,以BGR格式给出。
font_path -- 支持中文的字体文件的路径。
font_size -- 字体大小
font_thickness -- 字体 加粗
返回:
带有中文字符串的图像。
"""
# 转换图像以便使用PIL
pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(pil_img)
# 加载自定义字体
pil_font = ImageFont.truetype(os.path.dirname(__file__) + "\\"+font_path, font_size) # 字体大小可能需要调整以匹配OpenCV的font_scale
# 获取文本的宽度和高度
text_width, text_height = draw.textbbox((0, 0), label, font=pil_font)[2:]
# 计算文本的起始位置,确保它不会超出图像边界
x, y = position
y_adjusted = max(y - text_height, 0)
position = (x, y_adjusted)
# 使用PIL绘制文本
draw.text(position, label, fill=tuple(color), font=pil_font,width=font_thickness)
# 将PIL图像转换回OpenCV图像
img_with_text = cv2.cvtColor(np.asarray(pil_img), cv2.COLOR_RGB2BGR)
return img_with_text
class LanRenOnnxYolov():
def __init__(self,weights='yolov5l_320.onnx',model_h=320, model_w=320, thred_nms=0.4, thred_cond=0.4,providers=["CUDAExecutionProvider",'CPUExecutionProvider'],dic_labels={}):
"""
yolov onnx推理
providers: [] ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']
dic_labels: {0: 'person', 1: 'bicycle'}
"""
self.weights=weights
self.model_h=model_h
self.model_w=model_w
self.thred_nms=thred_nms
self.thred_cond=thred_cond
self.colors=[[random.randint(140, 255) for _ in range(3)] for color in range(len(dic_labels))]
self.openfile_name_model = weights # 模型名称
# self.so = ort.SessionOptions() # 树莓派上保留以下两段代码,注释下面那行代码
# self.net = ort.InferenceSession(self.openfile_name_model, self.so)
# ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']
try:
self.net = ort.InferenceSession(self.openfile_name_model,
providers=providers) # 在树莓派上这里不需指定推理设备
p=self.net.get_providers()
print("执行提供者:",p,)
print("GPU加速:",str(p).find("CUDA")!=-1 or str(p).find("Tensorrt")!=-1 )
# 标签字典
self.dic_labels = dic_labels
big_img = np.zeros((1080, 1920, 3), dtype=np.uint8)
self.detect(big_img )
old_time = time.time()
self.detect(big_img )
print("检测帧率:", 1000 // int((time.time() - old_time) * 1000))
except:
print("CUDA加速失败,使用CPU推理")
providers=['CPUExecutionProvider']
self.net = ort.InferenceSession(self.openfile_name_model,
providers=providers) # 在树莓派上这里不需指定推理设备
p = self.net.get_providers()
print("执行提供者:", p, )
print("GPU加速:", str(p).find("CUDA") != -1 or str(p).find("Tensorrt") != -1)
# 标签字典
self.dic_labels = dic_labels
big_img = np.zeros((1080, 1920, 3), dtype=np.uint8)
self.detect(big_img)
old_time = time.time()
self.detect(big_img)
print("检测帧率:", 1000 // int((time.time() - old_time) * 1000))
# 标注目标
def plot_one_box(self,x, img, color=None, label=None, line_thickness=None):
"""
description: Plots one bounding box on image img,
this function comes from YoLov5 project.
param:
x: a box likes [x1,y1,x2,y2]
img: a opencv image object
color: color to draw rectangle, such as (0,255,0)
label: str
line_thickness: int
return:
img
"""
tl = (
line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
) # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
#支持中文的写法
img = put_chinese_text_on_image(img, label, (c1[0], c1[1] - 2), [color[2],color[1],color[0]],font_size= t_size[1])
#不支持中文
# cv2.putText(
# img,
# label,
# (c1[0], c1[1] - 2),
# 0,
# tl / 3,
# [225, 255, 255],
# thickness=tf,
# lineType=cv2.LINE_AA,
# )
return img
# 极大值抑制
def post_process_opencv(self,outputs, img_h, img_w,):
conf = outputs[:, 4].tolist()
c_x = outputs[:, 0] / self.model_w * img_w
c_y = outputs[:, 1] / self.model_h * img_h
w = outputs[:, 2] / self.model_w * img_w
h = outputs[:, 3] / self.model_h * img_h
p_cls = outputs[:, 5:]
if len(p_cls.shape) == 1:
p_cls = np.expand_dims(p_cls, 1)
cls_id = np.argmax(p_cls, axis=1)
p_x1 = np.expand_dims(c_x - w / 2, -1)
p_y1 = np.expand_dims(c_y - h / 2, -1)
p_x2 = np.expand_dims(c_x + w / 2, -1)
p_y2 = np.expand_dims(c_y + h / 2, -1)
areas = np.concatenate((p_x1, p_y1, p_x2, p_y2), axis=-1)
areas = areas.tolist()
ids = cv2.dnn.NMSBoxes(areas, conf, self.thred_cond, self.thred_nms)
if len(ids) > 0:
return np.array(areas)[ids], np.array(conf)[ids], cls_id[ids]
else:
return [], [], []
# 推理
def detect(self,img_np,save_path=None,plot_box=True):
'''
预测
'''
img0 = img_np.copy()
# 图像预处理
img = cv2.resize(img0, [self.model_w, self.model_h], interpolation=cv2.INTER_AREA) # 缩放
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 格式转换
img = img.astype(np.float32) / 255.0 # 归一化
blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0) # 维度转换
# 模型推理
outs = self.net.run(None, {self.net.get_inputs()[0].name: blob})[0].squeeze(axis=0)
# 输出坐标矫正
# outs = cal_outputs(outs, nl, na, model_w, model_h, anchor_grid, stride)
# 检测框计算
img_h, img_w, _ = np.shape(img0)
boxes, confs, ids = self.post_process_opencv(outs, img_h, img_w)
res_loc=[]
for box, score, id in zip(boxes, confs, ids):
# 标签
label = '%s:%.2f' % (self.dic_labels[id], score)
# 坐标转换
xyxy=box.astype(np.int16)
#画框框
if plot_box or save_path != None:
img0=self.plot_one_box(xyxy, img0, label=label, line_thickness=None,color= self.colors[id])
# 数据保存
res_loc.append({"label": self.dic_labels[id], "x": int(xyxy[0]) + (int(xyxy[2]) - int(xyxy[0])) // 2,
"y": int(xyxy[1]) + (int(xyxy[3]) - int(xyxy[1])) // 2, "sim": float(f"{score:.2f}"),
"points": ((xyxy[0],xyxy[1]), (xyxy[2],xyxy[3]))})
if save_path != None:
cv2.imwrite(save_path, img0)
if img0.shape[2]==4:
return res_loc,cv2.cvtColor(img0, cv2.COLOR_BGRA2BGR) # 格式转换
return res_loc,img0
if __name__ == '__main__':
image_path="D:\\pythonProjects\\yuanshenAI\\datasets\\yuanshen\\images\\17141983915923.jpg"
weights="./datas/yolov5l_320.onnx"
model_h=320
model_w=320
dic_labels={0:"玩家",1:"采集物",2:"生命树",3:"开关",4:"怪物",5:"提示关闭",6:"使用原粹树脂",7:"使用浓缩树脂",
8:"关闭",9:"继续挑战",10:"退出秘境",11:"副本门框",12:"副本打完了",13:"捡东西",14:"副本楼梯",
15:"往下落",16:"矿石",17:"往上跳",18:"交互对话",19:"采集物-风",20:"采集物-冰",21:"确定",
22:"取消",23:"返回",24:"被控了",25:"在水面",26:"宠物"}
big_img = cv2.imdecode(np.fromfile(file=image_path, dtype=np.uint8), cv2.IMREAD_COLOR) # 加载大图
yolov=LanRenOnnxYolov(weights=weights,model_w=model_w,model_h=model_h,dic_labels=dic_labels)
old_time=time.time()
res_loc,img=yolov.detect(big_img,save_path="test.jpg")
print((time.time()-old_time)*1000,res_loc)
old_time = time.time()
res_loc, img = yolov.detect(big_img, save_path=None, plot_box=True)
print((time.time() - old_time) * 1000, res_loc)
old_time = time.time()
res_loc, img = yolov.detect(big_img, save_path=None, plot_box=True)
print((time.time() - old_time) * 1000, res_loc)
old_time = time.time()
res_loc, img = yolov.detect(big_img, save_path=None, plot_box=True)
print((time.time() - old_time) * 1000, res_loc)
old_time = time.time()
res_loc, img = yolov.detect(big_img, save_path=None, plot_box=True)
print((time.time() - old_time) * 1000, res_loc)
2、循环截图功能
vi screenshot.py
# -*- coding: utf-8 -*-
import os
import random
import time
import traceback
from ctypes import windll
#import cv2
import numpy as np
import win32gui
import win32ui
import win32con
from PIL import Image
from logger_module import logger
def screenshot(hwnd, left=0, top=0, right=0, bottom=0, filename=None):
try:
#old_time=time.time()
width = right-left
height = bottom - top
# 判断窗口是否可见
if not win32gui.IsWindowVisible(hwnd):
return False
# 创建设备描述表
hwnd_dc = win32gui.GetDC(hwnd) #GetWindowDC
mfc_dc = win32ui.CreateDCFromHandle(hwnd_dc)
save_dc = mfc_dc.CreateCompatibleDC()
# 创建位图对象
save_bitmap = win32ui.CreateBitmap()
save_bitmap.CreateCompatibleBitmap(mfc_dc, width, height)
# 将位图对象绑定到设备描述表
save_dc.SelectObject(save_bitmap)
# result = windll.user32.PrintWindow(hwnd, save_dc.GetSafeHdc(),1) #0 1 或者3 3没有透明通道信息
# if result == 0:
# print("PrintWindow failed")
# return False
# # 将截图保存到位图对象中
save_dc.BitBlt((0, 0), (width, height), mfc_dc, (left, top), win32con.SRCCOPY)#win32con.CAPTUREBLT win32con.SRCCOPY
# 将位图对象转换为OpenCV图像
bmp_info = save_bitmap.GetInfo()
bmp_str = save_bitmap.GetBitmapBits(True)
img = np.frombuffer(bmp_str, dtype='uint8').reshape((bmp_info['bmHeight'], bmp_info['bmWidth'], 4))
#img = img[top:bottom, left:right]
# 测试透明通道是否截到
# img = cv2.split(img)
# # 提取Alpha通道
# img = img[3]
# cv2.imshow('Alpha Channel', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if filename is not None:
# 保存位图对象到文件
img_pil = Image.fromarray(img[..., [2, 1, 0]])
img_pil.save(filename, format='JPEG', quality=90)
# 删除对象,释放资源
save_dc.DeleteDC()
win32gui.ReleaseDC(hwnd, hwnd_dc)
win32gui.DeleteObject(save_bitmap.GetHandle())
#print(time.time()-old_time)
return img
except :
logger.error(traceback.format_exc())
return False
if __name__ == '__main__':
try:
step=int(input("多少秒截一张图?输入数字按回车确认:"))
except:
step=2
input("请将游戏设置为1920*1080分辨率,然后按回车开始截图")
# 获取窗口句柄
hwnd = win32gui.FindWindow("UnityWndClass", "原神") # 替换成你实际的窗口句柄
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom = 0, 0, 1920, 1080 # 替换成你实际的区域坐标
#判断是否有这个目录 没有就创建
if not os.path.exists("./原神截图"):
os.mkdir("./原神截图")
while True:
time.sleep(step)
filename = f"./原神截图/{int(time.time())}{random.randint(1000,9999)}.jpg"
ret=screenshot(hwnd, left, top, right, bottom, filename=filename)
print("截图成功保存在:",filename)
3、添加视角分析
vi anglerecognition.py
# -*- coding: utf-8 -*-
import copy
import time
import cv2
import os
import numpy as np
def get_view(small_img,radius=70):
"""
返回当前视野角度 感谢网友飞宇提供的识别方法
Returns:以正北方向为0度计算
"""
# radius = 70 # 定义截屏的宽度和长度
# # 获取窗口句柄
# hwnd = win32gui.FindWindow("UnityWndClass", "原神") # 替换成你实际的窗口句柄
#
#
# small_img = screenshot(hwnd,(98+radius,55+radius), radius=radius)
#
#
channels = cv2.split(small_img)
# 提取Alpha通道
alpha3 = channels[3] # 索引为3,因为Alpha通道是第四个通道
# cv2.imshow('Alpha Channel', alpha3)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# _, alpha = cv2.threshold(small_img, 10, 255, cv2.THRESH_BINARY) #这个是提取蓝色箭头图像
# 假设 gray_channel 是你的灰度图像
# 创建一个掩膜,将150-200之间的值设为255,其他设为0
mask = np.zeros_like(alpha3)
mask[alpha3 >= 151] = 255
mask[alpha3 < 151] = 0
mask[alpha3 > 229] = 0
img = np.zeros((radius * 2, radius * 2), dtype=np.uint8)
center = (radius, radius)
max_overlap = 0 # 最佳的重叠面积
max_angle = -1 # 最佳的角度
increment = 20 # 递增的角度,这里设置为10度
for angle in range(0, 360, increment):
img_copy = img.copy()
# 画一个扇形
cv2.ellipse(img_copy, center, (radius, radius), 225 + angle, 0, 90, 255, -1)
overlap = np.logical_and(img_copy, mask).sum()
if overlap > max_overlap:
max_overlap = overlap
max_angle = angle
start_angle = max_angle - 10
if start_angle < 0:
start_angle = 0
end_angle = start_angle + 10
else:
end_angle = start_angle + 20
for angle in range(start_angle, end_angle, 1):
img_copy = img.copy()
# 画一个扇形
cv2.ellipse(img_copy, center, (radius, radius), 225 + angle, 0, 90, 255, -1)
overlap = np.logical_and(img_copy, mask).sum()
if overlap > max_overlap:
max_overlap = overlap
max_angle = angle
# print(f'最匹配角度:{max_angle},重叠面积:{max_overlap}')
# print(f'用时:{time.time() - old_time}')
#cv2.destroyAllWindows()
return max_angle
def get_view2(small_img,radius=70):
"""
返回当前视野角度 感谢网友飞宇提供的识别方法
Returns:以正北方向为0度计算
"""
# radius = 70 # 定义截屏的宽度和长度
# # 获取窗口句柄
# hwnd = win32gui.FindWindow("UnityWndClass", "原神") # 替换成你实际的窗口句柄
#
#
# small_img = screenshot(hwnd,(98+radius,55+radius), radius=radius)
#
#
channels = cv2.split(small_img)
# 提取Alpha通道
alpha3 = channels[3] # 索引为3,因为Alpha通道是第四个通道
#cv2.imshow('Alpha Channel', alpha3)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# _, alpha = cv2.threshold(small_img, 10, 255, cv2.THRESH_BINARY) #这个是提取蓝色箭头图像
# 假设 gray_channel 是你的灰度图像
# 创建一个掩膜,将150-200之间的值设为255,其他设为0
mask = np.zeros_like(alpha3)
mask[alpha3 >= 9] = 255
mask[alpha3 < 9] = 0
mask[alpha3 > 190] = 0
img = np.zeros((radius * 2, radius * 2), dtype=np.uint8)
center = (radius, radius)
max_overlap = 0 # 最佳的重叠面积
max_angle = -1 # 最佳的角度
increment = 20 # 递增的角度,这里设置为10度
for angle in range(0, 360, increment):
img_copy = img.copy()
# 画一个扇形
cv2.ellipse(img_copy, center, (radius, radius), 225 + angle, 0, 90, 255, -1)
overlap = np.logical_and(img_copy, mask).sum()
if overlap > max_overlap:
max_overlap = overlap
max_angle = angle
start_angle = max_angle - 10
if start_angle < 0:
start_angle = 0
end_angle = start_angle + 10
else:
end_angle = start_angle + 20
for angle in range(start_angle, end_angle, 1):
img_copy = img.copy()
# 画一个扇形
cv2.ellipse(img_copy, center, (radius, radius), 225 + angle, 0, 90, 255, -1)
overlap = np.logical_and(img_copy, mask).sum()
if overlap > max_overlap:
max_overlap = overlap
max_angle = angle
return max_angle
def pyramid_template_matching(image, template, mask=None):
'''
返回当前导航的角度
:param image:
:param template:
:param mask:
:return:
'''
result = image.copy()
# 创建掩码
if mask is not None:
mask = mask.astype(np.uint8)
# 金字塔层级
#pyramid_levels =1
best_angle = 0 # 最佳匹配角度
max_similarity = 0 # 最大相似度
best_res=0
best_rotated_template=""
h, w = template.shape[:2]
scaled_mask = mask.astype(np.uint8)
scaled_image=image
scaled_template=template
# for level in range(pyramid_levels):
# # 缩放大图和小图
# scaled_image = cv2.resize(image, (0, 0), fx=1 / (2 ** level), fy=1 / (2 ** level))
# scaled_template = cv2.resize(template, (0, 0), fx=1 / (2 ** level), fy=1 / (2 ** level))
# if mask is not None:
# scaled_mask = cv2.resize(mask, (0, 0), fx=1 / (2 ** level), fy=1 / (2 ** level))
# scaled_mask = scaled_mask.astype(np.uint8)
# 旋转模板并进行匹配
for angle in range(0, 360, 10):
rotated_mask = rotate_image(scaled_mask, angle)
rotated_mask[rotated_mask >1] = 255 # 将灰色也变成白色
rotated_template = rotate_image(scaled_template, angle)
#save_rotated_image(rotated_template, "./datas/img", f"{angle}_template")
#save_rotated_image(rotated_mask, "./datas/img",f"{angle}_mask")
res = cv2.matchTemplate(scaled_image, rotated_template, cv2.TM_CCORR_NORMED, mask=rotated_mask)
similarity = cv2.minMaxLoc(res)[1] # 获取匹配相似度
if similarity > max_similarity:
max_similarity = similarity
best_angle = angle
best_res= res
best_rotated_template = rotated_template
best_angle_bk= copy.copy(best_angle)
start_angle=best_angle_bk-10
if start_angle<0:
start_angle=0
end_angle=start_angle+10
else:
end_angle = start_angle + 20
for angle in range(start_angle,end_angle,1):
rotated_mask = rotate_image(scaled_mask, angle)
rotated_mask[rotated_mask > 1] = 255 # 将灰色像素值改为黑色像素值
rotated_template = rotate_image(scaled_template, angle)
res = cv2.matchTemplate(scaled_image, rotated_template, cv2.TM_CCORR_NORMED, mask=rotated_mask)
similarity = cv2.minMaxLoc(res)[1] # 获取匹配相似度
if similarity > max_similarity:
max_similarity = similarity
best_angle = angle
best_res = res
best_rotated_template=rotated_template
#min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(best_res)
#top_left = max_loc
#bottom_right = (top_left[0] + w, top_left[1] + h)
# 绘制角度
#cv2.putText(result, f"{int(best_angle)}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 在原始大图上绘制矩形框标记匹配区域
#cv2.rectangle(result, top_left, bottom_right, (0, 255, 0), 2)
return result,max_similarity,best_angle,best_rotated_template
def save_rotated_image(rotated_image, save_dir, index):
save_path = os.path.join(save_dir, f"{index}.png")
cv2.imwrite(save_path, rotated_image)
def rotate_image(image, angle):
'''
将图片旋转多少度
:param image:
:param angle:
:return:
'''
h, w = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, -angle, 1.0)
rotated_image = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0, 0))
return rotated_image
def template_matching(image, template, threshold=0.8,maxcnt=0,mask=None):
'''
模板匹配 多个
:param image:
:param template:
:param mask:
:return:
'''
method = cv2.TM_CCOEFF_NORMED
# 创建掩码
if mask is not None:
mask = mask.astype(np.uint8)
scaled_mask = mask.astype(np.uint8)
else:
scaled_mask=mask
scaled_image=image
scaled_template=template
h, w = scaled_template.shape[:2]
res = cv2.matchTemplate(scaled_image, scaled_template, method, mask=scaled_mask)
result = []
while True:
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
if max_val < threshold or max_val == np.inf:
break
middle_point = [top_left[0] + w // 2, top_left[1] + h // 2]
result.append(dict(
result=middle_point,
rectangle=(top_left, (top_left[0], top_left[1] + h), (top_left[0] + w, top_left[1]),
(top_left[0] + w, top_left[1] + h)),
confidence=round(max_val, 2)
))
if maxcnt and len(result) >= maxcnt:
break
cv2.floodFill(res, None, max_loc, (-1000,), max_val - threshold + 0.1, 1, flags=cv2.FLOODFILL_FIXED_RANGE)
return result
if __name__ == '__main__':
# 读取图像和模板
# old_time=time.time()
# image = cv2.imdecode(np.fromfile(file=r"大图.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED) # 加载大图
# template = cv2.imdecode(np.fromfile(file=r"小图.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED) # 加载透明图
# mask = template[:, :, 3] # 提取透明度通道作为掩码
# result,max_similarity, best_angle = pyramid_template_matching(image, template, mask=mask)
# print(f"相似度:{max_similarity} 角度:{best_angle} 用时:{int((time.time()-old_time)*1000)}ms")
#
# # 显示结果图像
# cv2.imshow("Result", result)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
old_time=time.time()
image = cv2.imdecode(np.fromfile(file=r"D:\QQ聊天记录\111.bmp", dtype=np.uint8), cv2.IMREAD_COLOR) # 加载大图
template = cv2.imdecode(np.fromfile(file=r"D:\QQ聊天记录\p.png", dtype=np.uint8), cv2.IMREAD_COLOR) # 加载透明图
RET = template_matching(image, template, mask=None)
for result in RET:
print(result)
# 转换为 NumPy 数组
pts = np.array((result['rectangle'][0], result['rectangle'][1], result['rectangle'][3],
result['rectangle'][2]), np.int32)
cv2.polylines(image, [pts], True, (0, 255, 0), 5)
# 显示结果图像
cv2.imshow("324234", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
4、main.py 导入main库
vi main.py
from auto import main
5、全量代码(main.py)
import ctypes
import os
import subprocess
import sys
import time
import traceback
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QApplication
import state
from auto import main
from logger_module import logger
from jianchaupdate import get_var_data, get_updatetxt
from lanrentools.moni import find_child_window_handle, screen_width
if __name__ == '__main__':
try:
if ctypes.windll.shell32.IsUserAnAdmin():
hwnd_cmd = find_child_window_handle(0, "ConsoleWindowClass", "YuanShenAi")
if hwnd_cmd == 0:
hwnd_cmd = find_child_window_handle(0, "ConsoleWindowClass", "懒人原神")
vardata = get_var_data()
if vardata != False:
updatetxt = get_updatetxt()
print(updatetxt)
print("当前版本号:" + state.版本号)
if float(vardata[1]) > float(state.版本号):
print(
"如果更新失败手动下更新包手动覆盖: https://gitee.com/LanRenZhiNeng/lanren-genshin-impact-ai/releases")
var = input(f"发现更新的版本 v{vardata[1]} 是否需要更新? (输入yes or no 默认更新)\n")
if var == "":
var = "yes"
if var.lower() != "no":
update_process = subprocess.Popen(f'update.exe "{vardata[0]}.exe" "{vardata[2]}"',
creationflags=subprocess.CREATE_NEW_CONSOLE)
time.sleep(1)
sys.exit()
# 运行需要管理员权限的代码
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), 128)
os.environ['PNG_IGNORE_WARNINGS'] = '1'
# 在事件循环中创建一个任务对象并加入事件循环
aty = main() # 启动yolov服务
# ysam = YunShenAutoMap() # 启动寻路类
# app = QApplication(sys.argv) # 初始化Qt应用
# ratio = screen_width / 2560 # 分辨率比例
# # 设置全局字体大小
# # 计算字体大小
# base_font_size = 13
# # 基准字体大小,适合1920*1080分辨率
# new_font_size = int(base_font_size * ratio)
# font = QFont("Arial", new_font_size)
#
# # 子控件的宽度
# item_width=320
# item_height=240
# item_height_min = 60
# window_main = MainWindow()
# window_main.show()
# sys.exit(app.exec_()) # 监听消息不关闭
else:
# 如果不是管理员,则请求以管理员权限重新运行程序
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, " ".join(sys.argv), None, 1)
except Exception as err:
if str(err).find("sys.exit")==-1:
logger.error(traceback.format_exc())
6、创建素材目录
项目中有,直接把项目中的datas目录拷贝过去
7、初步运行 main.py
当前版本号:1.78
如果更新失败手动下更新包手动覆盖: https://gitee.com/LanRenZhiNeng/lanren-genshin-impact-ai/releases
发现更新的版本 v1.83 是否需要更新? (输入yes or no 默认更新)
no
[ 1 ; 3 1 m 2 0 2 4 - 0 9 - 1 4 1 0 : 3 5 : 0 8 . 4 5 3 5 9 9 5 [ E : o n n x r u n t i m e : D e f a u l t , p r o v i d e r _ b r i d g e _ o r t . c c : 1 7 4 4 o n n x r u n t i m e : : T r y G e t P r o v i d e r I n f o _ C U D A ] C : \ a \ _ w o r k \ 1 \ s \ o n n x r u n t i m e \ c o r e \ s e s s i o n \ p r o v i d e r _ b r i d g e _ o r t . c c : 1 4 2 6 o n n x r u n t i m e : : P r o v i d e r L i b r a r y : : G e t [ O N N X R u n t i m e E r r o r ] : 1 : F A I L : L o a d L i b r a r y f a i l e d w i t h e r r o r 1 2 6 " " w h e n t r y i n g t o l o a d " C : \ U s e r s \ A d m i n i s t r a t o r \ P y c h a r m P r o j e c t s \ y o l o 8 t e s t \ . v e n v \ l i b \ s i t e - p a c k a g e s \ o n n x r u n t i m e \ c a p i \ o n n x r u n t i m e _ p r o v i d e r s _ c u d a . d l l "
[ m
[ 0 ; 9 3 m 2 0 2 4 - 0 9 - 1 4 1 0 : 3 5 : 0 8 . 4 5 4 2 6 5 2 [ W : o n n x r u n t i m e : D e f a u l t , o n n x r u n t i m e _ p y b i n d _ s t a t e . c c : 8 7 0 o n n x r u n t i m e : : p y t h o n : : C r e a t e E x e c u t i o n P r o v i d e r I n s t a n c e ] F a i l e d t o c r e a t e C U D A E x e c u t i o n P r o v i d e r . P l e a s e r e f e r e n c e h t t p s : / / o n n x r u n t i m e . a i / d o c s / e x e c u t i o n - p r o v i d e r s / C U D A - E x e c u t i o n P r o v i d e r . h t m l # r e q u i r e m e n t s t o e n s u r e a l l d e p e n d e n c i e s a r e m e t . [ m
执行提供者: ['CPUExecutionProvider']
GPU加速: False
10:35:08,769 - 懒人原神AI - INFO - 躺宝连招插件服务已经启动!
检测帧率: 14
libpng warning: iCCP: known incorrect sRGB profile
四、寻路
vi main.py
# 在事件循环中创建一个任务对象并加入事件循环
aty = main() # 启动yolov服务
#添加依赖调用
from wayfinding import *
ysam = YunShenAutoMap() # 启动寻路类
1、添加寻路
vi wayfinding.py
# -*- coding: utf-8 -*-
#opencv 必须 4.8.1.78
from pynput import keyboard
from pynput.keyboard import Key
import auto
from anglerecognition import pyramid_template_matching, get_view
from screenshot import *
from findimg import *
from lanrentools.hookkeymose import HookKeyMose
from auto import *
def setresize():
pyautogui.keyUp('w')
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
class YunShenAutoMap(HookKeyMose):
def __init__(self):
HookKeyMose.__init__(self, hookKey=True)
pyautogui.PAUSE = 0.01
pyautogui.FAILSAFE=False
self.on_rec=False
self.map_matrix = []
self.now_point=(0,0)#当前的位置
self.old_point=(0,0)#上一次的位置
self.isRunRec=False
self.小地图区域=(87-10,45-10,246+10,204+10 )
# 让yolov接手的兴趣点用来存可能出现怪物,或者需要捡东西的地方
self.interest_point = []
def run_rec(self, image_path,image_path_lujin):
'''
录制路径
:param image_path_map: 地图路径 有障碍物的
:param image_path: 原图
:return:
'''
self.on_rec = True
self.isRunRec=True
self.interest_point=[]
# 获取窗口句柄
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom =self.小地图区域 # 替换成你实际的区域坐标
roi = None
result_post=(0,0)
big_img = cv2.imdecode(np.fromfile(file=image_path, dtype=np.uint8), cv2.IMREAD_COLOR) # 加载大图
big_height, big_width, _ = big_img.shape
big_img_yt = big_img.copy()
# height, width, _ = big_img.shape
# window_height=500
# window_width = int(window_height * width / height)
# sj=str(random.randint(1000,9999))
# # 创建窗口
# cv2.namedWindow('Genshin navigation'+sj, cv2.WINDOW_NORMAL)
# cv2.resizeWindow('Genshin navigation'+sj, window_width, window_height)
# cv2.setWindowProperty("Genshin navigation"+sj, cv2.WND_PROP_TOPMOST, 1)
logger.info("载入地图成功")
while True:
if self.get_window_handle_at_mouse_position() != hwnd:
logger.info("鼠标离开游戏了!")
time.sleep(0.5)
continue
# #time.sleep(interval)
if state.开关_是否展预测结果:
#cv2.waitKey(100)
pass
if self.on_rec == False:
# 分割路径为目录和文件名
dir_path, filename = os.path.split(image_path)
# 分割文件名和后缀
name, extension = os.path.splitext(filename)
new_file=image_path_lujin
cv2.circle(big_img, result_post, 2, (0,0, 255), -1)
for point in self.interest_point:
cv2.circle(big_img, point, 2, (0, 255, 0), -1)
# 保存图像
with open(new_file, 'wb') as f:
f.write( cv2.imencode(extension, big_img)[1].tobytes())
logger.info("录制完成! 保存在了:"+new_file)
return
# big_img = big_img_bk.copy()
small_img = screenshot(hwnd, left, top, right, bottom, None)
ret = findImgALL(big_img_yt, small_img,roi)
try:
for result in ret:
result_post = [result["result"][0], result["result"][1]]
sm_width,sm_height= result["rectangle"][2][0]-result["rectangle"][0][0],result["rectangle"][2][1]-result["rectangle"][0][1]
cv2.circle(big_img, result_post, 2, (255, 0, 0), -1)
roi = [result["rectangle"][0][0]-40 ,result["rectangle"][0][1]-40, sm_width+80 , sm_height+80 ]
if roi[0] < 0:
roi[0] = 0
if roi[1] < 0:
roi[1] = 0
if roi[2] > big_width:
roi[0] = big_width - roi[2]
if roi[3] > big_height:
roi[1] = big_height - roi[3]
self.now_point = result_post
# 显示匹配结果
break
except :
logger.error(traceback.format_exc())
roi=None
if state.开关_是否展预测结果:
if big_img.shape[2] == 4:
state.图片_找图 = cv2.cvtColor(big_img, cv2.COLOR_BGRA2BGR)
else:
state.图片_找图 = big_img
state.QT_信号.mysig_show_xunlu.emit()
#cv2.imshow('Genshin navigation'+sj, big_img)
if state.开关_是否展预测结果:
pass
#cv2.destroyAllWindows()
return True
def run_chuansong(self, image_path,maodian_path=""):
'''
传送
:param image_path_map: 地图路径 有障碍物的
:param image_path: 原图
:return:
'''
# 获取窗口句柄
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom =0,0,1920,1080 # 替换成你实际的区域坐标
p_left = rect[0] + w_p+left
p_top = rect[1] + h_p+top
roi = None
moban_img = cv2.imdecode(np.fromfile(file=image_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
if maodian_path!="":
moban_maodian_img = cv2.imdecode(np.fromfile(file=maodian_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
else:
moban_maodian_img=None
chuangsong_img=cv2.imdecode(np.fromfile(file="./datas/传送按钮.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
def ddd():
paimeng_img = cv2.imdecode(np.fromfile(file="./datas/派蒙.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED)
for _ in range(20):
time.sleep(1)
big_img = screenshot(hwnd, left, top, right, bottom, None)
res = template_matching(big_img, paimeng_img, mask=None)
if state.状态_循环开关 == False:
logger.info("强制退出!")
state.状态_需重新传送 = False
return False
for result in res:
result_post = [result["result"][0], result["result"][1]]
logger.info("传送完成!")
# 转换为 NumPy 数组
pts = np.array(
(
result['rectangle'][0], result['rectangle'][1], result['rectangle'][3], result['rectangle'][2]),
np.int32)
# 将数组从行向量转换为列向量
# pts = pts.reshape((-1, 1, 2))
if state.开关_是否展预测结果:
cv2.circle(big_img, result_post, 10, (0, 0, 255), -1)
cv2.polylines(big_img, [pts], True, (0, 255, 0), 5)
# cv2.imshow('Genshin navigation' , big_img)
if big_img.shape[2] == 4:
state.图片_找图 = cv2.cvtColor(big_img, cv2.COLOR_BGRA2BGR)
else:
state.图片_找图 = big_img
state.QT_信号.mysig_show_xunlu.emit()
state.状态_传送中 = False
state.状态_需重新传送 = False
return True
state.状态_需重新传送 = True
logger.info("需要重新传送!")
return False
big_img = screenshot(hwnd, left, top, right, bottom, None)
res = findImgALL(big_img, moban_img,roi)
time.sleep(1)
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
set_window_activate(hwnd)
time.sleep(0.2)
if state.状态_循环开关== False:
logger.info("强制退出!")
state.状态_需重新传送 = False
return False
try:
if len(res) == 0:
logger.info(f"没定位到传送模板")
state.状态_需重新传送 = True
return False
for result in res:
logger.info("找到了传送位置")
result_post = [result["result"][0], result["result"][1]]
if state.开关_是否展预测结果:
cv2.circle(big_img, result_post, 10, (0, 0, 255), -1)
cv2.polylines(big_img, result['dst_pot'], True, (0, 255, 0), 5)
#cv2.imshow('Genshin navigation' , big_img)
#cv2.imwrite("./output.jpg", big_img)
if big_img.shape[2] == 4:
state.图片_找图 = cv2.cvtColor(big_img, cv2.COLOR_BGRA2BGR)
else:
state.图片_找图 = big_img
state.QT_信号.mysig_show_xunlu.emit()
#cv2.waitKey(100)
# 显示匹配结果
mouse_move(p_left +result_post[0], p_top + result_post[1])
time.sleep(0.1)
mouse_left_down()
time.sleep(0.1)
mouse_left_up()
time.sleep(0.5)
break
except Exception as err:
logger.info(f"没找到传送位置{err}")
state.状态_需重新传送 = True
return False
time.sleep(0.5)
if state.状态_循环开关 == False:
state.状态_需重新传送 = False
logger.info("强制退出!")
return False
big_img = screenshot(hwnd, left, top, right, bottom, None)
res=template_matching(big_img, chuangsong_img, mask=None)
ret = False
try:
for result in res:
result_post = [result["result"][0], result["result"][1]]
logger.info(f"找到了传送按钮1{result_post} {result['confidence']}")
# 转换为 NumPy 数组
pts = np.array((result['rectangle'][0], result['rectangle'][1], result['rectangle'][3], result['rectangle'][2]), np.int32)
# 将数组从行向量转换为列向量
#pts = pts.reshape((-1, 1, 2))
if state.开关_是否展预测结果:
cv2.circle(big_img, result_post, 10, (0, 0, 255), -1)
cv2.polylines(big_img, [pts], True, (0, 255, 0), 5)
# cv2.imshow('Genshin navigation' , big_img)
#cv2.imwrite("./output.jpg", big_img)
if big_img.shape[2] == 4:
state.图片_找图 = cv2.cvtColor(big_img, cv2.COLOR_BGRA2BGR)
else:
state.图片_找图 = big_img
state.QT_信号.mysig_show_xunlu.emit()
# cv2.waitKey(100)
# 显示匹配结果
mouse_move(p_left + result_post[0]+100, p_top + result_post[1])
time.sleep(0.1)
mouse_left_down()
time.sleep(0.1)
mouse_left_up()
time.sleep(0.5)
return ddd()
except Exception as err:
logger.info(f"没有找到传送按钮{err}")
state.状态_需重新传送 = True
return False
if ret != True:
time.sleep(0.5)
if state.状态_循环开关== False:
state.状态_需重新传送 = False
logger.info("强制退出!")
return False
big_img = screenshot(hwnd, left, top, right, bottom, None)
res = findImgALL(big_img, moban_maodian_img, None)
ret = False
try:
if res == []:
logger.info("传送失败")
state.状态_需重新传送 = True
return False
for result in res:
result_post = [result["result"][0], result["result"][1]]
if result["result"][0]<581 and result["result"][1]<401:
ret = False
continue
if result['confidence']<0.8:
ret = False
continue
logger.info(f"找到了锚点选项 {result_post} {result['confidence']}")
if state.开关_是否展预测结果:
cv2.circle(big_img, result_post, 10, (0, 0, 255), -1)
cv2.polylines(big_img, result['dst_pot'], True, (0, 255, 0), 5)
# cv2.imshow('Genshin navigation' , big_img)
#cv2.imwrite("./output.jpg", big_img)
if big_img.shape[2] == 4:
state.图片_找图 = cv2.cvtColor(big_img, cv2.COLOR_BGRA2BGR)
else:
state.图片_找图 = big_img
state.QT_信号.mysig_show_xunlu.emit()
# cv2.waitKey(100)
# 显示匹配结果
mouse_move(p_left + result_post[0], p_top + result_post[1])
time.sleep(0.1)
mouse_left_down()
time.sleep(0.1)
mouse_left_up()
time.sleep(0.5)
ret = True
break
except Exception as err:
logger.info(f"没找到锚点选项{err}" )
state.状态_需重新传送 = True
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
time.sleep(1)
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
return False
if ret == True:
big_img = screenshot(hwnd, left, top, right, bottom, None)
res = template_matching(big_img, chuangsong_img, mask=None)
if state.状态_循环开关 == False:
logger.info("强制退出!")
state.状态_需重新传送 = False
return False
try:
if res==[]:
logger.info("传送失败")
state.状态_需重新传送 = True
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
time.sleep(1)
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
return False
for result in res:
result_post = [result["result"][0], result["result"][1]]
logger.info(f"找到了传送按钮2{result_post} {result['confidence']}" )
# 转换为 NumPy 数组
pts = np.array((result['rectangle'][0], result['rectangle'][1], result['rectangle'][3],
result['rectangle'][2]), np.int32)
# 将数组从行向量转换为列向量
# pts = pts.reshape((-1, 1, 2))
if state.开关_是否展预测结果:
cv2.circle(big_img, result_post, 10, (0, 0, 255), -1)
cv2.polylines(big_img, [pts], True, (0, 255, 0), 5)
# cv2.imshow('Genshin navigation' , big_img)
#cv2.imwrite("./output.jpg", big_img)
if big_img.shape[2] == 4:
state.图片_找图 = cv2.cvtColor(big_img, cv2.COLOR_BGRA2BGR)
else:
state.图片_找图 = big_img
state.QT_信号.mysig_show_xunlu.emit()
# cv2.waitKey(100)
# 显示匹配结果
mouse_move(p_left + result_post[0]+100, p_top + result_post[1])
time.sleep(0.1)
mouse_left_down()
time.sleep(0.1)
mouse_left_up()
time.sleep(0.5)
return ddd()
except Exception as err:
logger.info(f"传送失败{err}" )
state.状态_需重新传送 = True
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
time.sleep(1)
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
return False
else:
logger.info("传送失败")
state.状态_需重新传送 = True
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
time.sleep(1)
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
return False
#cv2.destroyAllWindows()
return True
def run_playback(self, image_path_map, image_path,wakuang=False,caiji=False,quickening_keys=None):
'''
执行寻路 直到移动到目标为止
:param image_path_map: 地图路径 有障碍物的
:param image_path: 原图
:param isShow: 是否显示地图
:param quickening_keys: 用来随机赶路的按钮 比如
[{"probability":0.1,"keys":[ {"key":"1","time":0.2 }, {"key":"e","time":0.5}]}]
代表概率10%(100毫秒一次判断) 按1 0.2秒 然后 按E 1秒 支持无数组合
:return:
'''
self.isRunRec = False
state.状态_循环开关= True
state.状态_已经有寻路了=True
self.load_map(image_path_map)
self.map_matrix_copy = self.map_matrix.copy()
def setangle(angle_now, angle_target, fault=6):
nonlocal best_angle,quickening_keys
ret=True
angle_diff = abs(angle_now - angle_target)
if angle_diff <= fault:
ret= False
if state.状态_循环开关== False:
logger.info("强制退出1!")
pyautogui.keyUp('w')
ret= False
if ret==False:
return False
angle_diff = int((angle_target - angle_now) % 360)
if angle_diff > 180:
fangxiang = -1
angle_diff = 360 - angle_diff
else:
fangxiang = 1
if abs(angle_diff < fault):
return True
#print(f"调整视角 相差:{fangxiang * angle_diff}° 当前:{angle_now}° 目标:{angle_target}° ")
mouse_moveR(int(fangxiang * 5 * angle_diff), 0)
time.sleep(0.05)
return True
state.计次_定位失败=0
# 获取窗口句柄
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
while True:
time.sleep(1)
if self.get_window_handle_at_mouse_position() == hwnd:
break
if state.状态_循环开关== False:
logger.info("强制退出!")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_需重新传送 = False
return False
# 设定保存截图的文件夹路径和文件名前缀
#folder_path = "./datas/img/"
# 设定定时器间隔(秒)
roi=None
big_img_bk = cv2.imdecode(np.fromfile(file=image_path, dtype=np.uint8),cv2.IMREAD_UNCHANGED) # 加载大图
big_height, big_width, big_td = big_img_bk.shape
if big_td==3:
# # 将24位彩色图片转换为32位彩色图片
big_img_bk = cv2.cvtColor(big_img_bk, cv2.COLOR_BGR2BGRA)#BGRA COLOR_BGR2BGRA
template = cv2.imdecode(np.fromfile(file=r"./datas/小图.png", dtype=np.uint8), cv2.IMREAD_UNCHANGED) # 加载透明图
mask = template[:, :, 3] # 提取透明度通道作为掩码
paimeng_img = cv2.imdecode(np.fromfile(file="./datas/派蒙.png", dtype=np.uint8),
cv2.IMREAD_UNCHANGED)
result_old = {"result": [0, 0]}
state.计数_卡主次数=0
state.状态_在爬墙=False
interest_coordinate2=None
interest_coordinate=None
state.游戏_打怪前坐标 = [0, 0]
state.游戏_当前目标坐标 = [0, 0]
state.计次_误差过大=0
self.old_point= [0, 0]
state.计次_识别次数=0
state.计次_移动停滞 =0
while True:
#old_time = time.time()
try:
time.sleep(0.01)
if state.状态_循环开关 == False:
logger.info("强制退出!")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_需重新传送 = False
return False
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom = self.小地图区域 # 替换成你实际的区域坐标
if state.状态_全局暂停:
time.sleep(1)
continue
if not state.状态_YOLOV :
time.sleep(1)
continue
if state.计次_误差过大 == 20:
self.old_point = copy.copy(state.游戏_当前目标坐标)
if state.计次_误差过大>= 30:
logger.info("误差过大了30次,重新运行这个任务")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_需重新传送 = True
return False
if state.计次_移动停滞>=20:
logger.info("移动停滞了20次,重新运行这个任务")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_需重新传送 = True
return False
if state.计次_定位失败>=30:
logger.info("定位失败次数超过30次,重新运行这个任务")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_需重新传送 = True
return False
self.quickening(quickening_keys)
big_img = big_img_bk.copy()
#filename = f"{folder_path}{int(time.time())}.png"
height = bottom - top
width = right - left
small_img = screenshot(hwnd, left, top, right, bottom, filename=None)#RGBA
if type(small_img)==bool:
hwnd = win32gui.FindWindow("UnityWndClass", state.GAME_TITLE) # 替换成你实际的窗口句柄
time.sleep(1)
continue
# 识别位置和方向
_, max_similarity, best_angle, best_rotated_template = pyramid_template_matching(
small_img[int(height / 2) - 30:int(height / 2) + 30, int(width / 2) - 30:int(width / 2) + 30],
template,
mask=mask)
best_angle = int(best_angle)
#cv2.imwrite(filename,small_img)
#cv2.imwrite(f"{folder_path}{int(time.time())}_111.png",small_img[int(height / 2) - 30:int(height / 2) + 30, int(width / 2) - 30:int(width / 2) + 30])
# 识别位置和方向
state.游戏_当前视野角度= get_view(small_img[int(height / 2) - 70:int(height / 2) + 70, int(width / 2) - 70:int(width / 2) + 70],70)
if state.游戏_当前视野角度==-1:
state.游戏_当前视野角度=best_angle
state.游戏_当前导航角度=best_angle
#print(state.游戏_当前视野角度,state.游戏_当前导航角度)
# 裁剪图像,去除Alpha通道
# small_img=small_img[:, :, :3]
ret = findImgALL(big_img, small_img,roi)
if ret==None or ret == []:
logger.info(f"定位失败{state.计次_定位失败}" )
state.计数_卡主次数 = 0
state.计次_定位失败 += 1
if state.计次_定位失败>=15:
pyautogui.keyDown('shift')
time.sleep(0.3)
pyautogui.keyUp('shift')
# 如果定位失败,判断派蒙是否存在
#如果派蒙不在 并且不是在连招过程中,则说明现在 在不明界面中,尝试按ESC键和点击指定位置跳过对话
res__ = template_matching(screenshot(hwnd, 0, 0, 1920, 1080, filename=None), paimeng_img, mask=None)
if res__==[] and state.状态_是否回放中==False:
logger.info("发现不明界面,尝试按ESC键和点击指定位置跳过对话")
rect = win32gui.GetWindowRect(hwnd)
w_p, h_p = (rect[2] - rect[0]) - 1920, (rect[3] - rect[1]) - 1080
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
p_left = rect[0] + w_p
p_top = rect[1] + h_p
mouse_move(p_left+1400,p_top+584)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.2)
mouse_move(p_left + 1412, p_top + 652)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.2)
mouse_move(p_left + 1419, p_top + 729)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
time.sleep(0.2)
mouse_move(p_left + 1380, p_top + 801)
time.sleep(0.2)
mouse_left_down()
time.sleep(0.2)
mouse_left_up()
if random.randint(0,2)==1:
time.sleep(0.2)
pyautogui.keyDown("esc")
time.sleep(0.2)
pyautogui.keyUp("esc")
if state.状态_寻路中:
if roi==None:
if result_old!={"result": [0, 0]}:
ret = [result_old]
else:
roi = None
continue
else:
roi = None
continue
roi=None
for result in ret:
if result["result"] !=result_old["result"] :
state.计次_定位失败 = 0
sm_width,sm_height= result["rectangle"][2][0]-result["rectangle"][0][0],result["rectangle"][2][1]-result["rectangle"][0][1]
state.游戏_当前坐标 = [result["result"][0], result["result"][1]]
roi = [result["rectangle"][0][0]-40 ,result["rectangle"][0][1]-40, sm_width+80 , sm_height+80 ]
if roi[0]<0:
roi[0]=0
if roi[1]<0:
roi[1]=0
if roi[2]>big_width:
roi[0]=big_width-roi[2]
if roi[3] > big_height:
roi[1] = big_height - roi[3]
self.update_map_show(big_img)
if state.状态_寻路中:
if state.游戏_当前坐标[0] < 0 or state.游戏_当前坐标[1] < 0:
roi = None
break
if abs(state.游戏_当前坐标[0] - self.end[0]) <= 120 and abs(
state.游戏_当前坐标[1] - self.end[1]) <= 120 and quickening_keys != None:
# 快到终点了,ban掉这些赶路的,防止走错地方
quickening_keys = None
if abs(state.游戏_当前坐标[0] - self.end[0]) <= 4 and abs(state.游戏_当前坐标[1] - self.end[1]) <= 4:
logger.info("已经到达目的地")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_需重新传送 = False
return True
state.计次_识别次数+=1
if state.计次_识别次数>=8:
if state.状态_是否回放中==False and state.状态_寻路中:
if abs(state.游戏_当前坐标[0] - result_old["result"][0]) <=3 and abs(
state.游戏_当前坐标[1] - result_old["result"][1]) <= 3 :
state.计次_移动停滞 += 1
state.计数_卡主次数+=1
if state.计数_卡主次数 >= 10:
if abs(state.游戏_当前坐标[0] - self.end[0]) <= 20 and abs(
state.游戏_当前坐标[1] - self.end[1]) <= 20:
logger.info("非常接近目的地了!但是遇到意外,因此跳过这个任务!")
pyautogui.keyUp('w')
state.状态_已经有寻路了 = False
state.状态_需重新传送 = False
return True
if random.randint(0, 1):
pyautogui.keyDown('a')
aaa = -90
else:
pyautogui.keyDown('d')
aaa = 90
pyautogui.keyUp('w')
mouse_moveR(6 * aaa, 0)
state.计数_卡主次数 = 0
logger.info("卡主太多次,尝试按X")
pyautogui.keyDown('x')
time.sleep(0.2)
pyautogui.keyUp('x')
time.sleep(1)
pyautogui.keyUp('d')
pyautogui.keyUp('a')
pyautogui.keyDown('w')
pyautogui.keyDown('x')
time.sleep(0.2)
pyautogui.keyUp('x')
time.sleep(5)
pyautogui.keyUp('w')
else:
logger.info("尝试脱困")
pyautogui.keyDown('w')
pyautogui.keyDown('space')
time.sleep(0.2)
pyautogui.keyUp('space')
rrrr = random.randint(1, 20)
if rrrr == 3:
pyautogui.keyDown('a')
time.sleep(1)
pyautogui.keyUp('a')
elif rrrr == 1:
pyautogui.keyDown('d')
time.sleep(1)
pyautogui.keyUp('d')
elif rrrr == 2:
pyautogui.keyUp('w')
pyautogui.keyDown('d')
pyautogui.keyDown('s')
time.sleep(1)
pyautogui.keyUp('s')
pyautogui.keyUp('d')
time.sleep(2)
pyautogui.keyDown('w')
elif rrrr == 4:
pyautogui.keyUp('w')
pyautogui.keyDown('a')
pyautogui.keyDown('s')
time.sleep(1)
pyautogui.keyUp('s')
pyautogui.keyUp('a')
time.sleep(2)
pyautogui.keyDown('w')
time.sleep(0.2)
pyautogui.keyUp('w')
else:
state.计次_移动停滞 -=1
if state.计次_移动停滞 <= 0:
state.计次_移动停滞=0
state.计数_卡主次数=0
if state.计次_识别次数 >= 8:
result_old = result.copy()
state.计次_识别次数=0
if state.开关_是否展预测结果:
#cv2.polylines(big_img, result['dst_pot'], True, (0, 255, 0), 5)
# 获取小图的宽度和高度
small_height, small_width, small_channels = best_rotated_template.shape
best_rotated_template = cv2.resize(best_rotated_template,
(int(small_width * 2), int(small_height * 2)))
small_height, small_width, small_channels = best_rotated_template.shape
# 计算放置小图像的左上角坐标,使其居中在result位置
x = state.游戏_当前坐标[0] - int(small_width / 2)
y = state.游戏_当前坐标[1] - int(small_height / 2)
# 将小图叠加到大图上
for c in range(small_channels):
big_img[y:y + small_height, x:x + small_width, c] = \
best_rotated_template[:, :, c] * (best_rotated_template[:, :, 3] / 255.0) + \
big_img[y:y + small_height, x:x + small_width, c] * (
1.0 - best_rotated_template[:, :, 3] / 255.0)
self.draw_arrow( big_img, state.游戏_当前坐标[0] , state.游戏_当前坐标[1], length=200, angle=state.游戏_当前视野角度)
if state.状态_寻路中:
#如果刚刚打完怪.需要回到原位
if state.游戏_打怪前坐标!=[0,0]:
state.游戏_当前目标坐标= copy.copy(state.游戏_打怪前坐标)
try:
if abs(state.游戏_当前坐标[0] - state.游戏_打怪前坐标[0]) <= 55 and abs(state.游戏_当前坐标[1] - state.游戏_打怪前坐标[1]) <= 55:
state.游戏_打怪前坐标 = [0, 0]
logger.info("回到之前的位置了")
continue
except:
state.游戏_打怪前坐标 = [0, 0]
logger.info("回到之前的位置了")
continue
else:
state.游戏_当前目标坐标,interest_coordinate = self.find_nearest_coordinate(self.map_matrix_copy, state.游戏_当前坐标,50,distance_min=8)
if state.游戏_当前目标坐标 == None:
state.游戏_当前目标坐标,interest_coordinate = self.find_nearest_coordinate(self.map_matrix_copy,state.游戏_当前坐标, None, distance_min=8)
if state.游戏_当前目标坐标 == None:
state.游戏_当前目标坐标 = copy.copy(self.old_point)
if self.old_point!=[0,0] and state.游戏_当前目标坐标!=[0,0]:
if abs(state.游戏_当前坐标[0]-self.old_point[0])>=70 or abs(state.游戏_当前坐标[1]-self.old_point[1])>=70:
state.游戏_当前坐标=copy.copy(self.old_point )
logger.info("定位误差过大!!")
state.计次_误差过大+=1
else:
state.计次_误差过大=0
if state.计次_误差过大==0:
self.old_point = copy.copy(state.游戏_当前坐标)
if interest_coordinate!=None:
interest_coordinate2=copy.copy(interest_coordinate)
if interest_coordinate2!=None:
if abs(state.游戏_当前坐标[0] - interest_coordinate2[0]) <= 5 and abs(state.游戏_当前坐标[1] - interest_coordinate2[1]) <= 5:
logger.info("附近有兴趣点!")
state.计时_未寻路 = int(time.time())
state.状态_寻路中 = False
state.游戏_打怪前坐标 = copy.copy(state.游戏_当前目标坐标)
state.计数_没找到任何目标 = 0
state.计数_没找到怪物 = 0
pyautogui.keyUp('w')
state.计数_卡主次数 = 0
state.计次_识别次数 = 0
mouse_left_down()
mouse_left_up()
if wakuang or caiji:
logger.info("在连招中....")
pyautogui.keyDown('x')
time.sleep(0.2)
pyautogui.keyUp('x')
state.状态_是否回放中 = True
asyncio.run_coroutine_threadsafe(send_to_client(-1, "脚本执行#@@#2"),
auto.loop)
time.sleep(1)
self.remove_coordinates_in_range_map(self.map_matrix_copy, interest_coordinate2, 5,state.游戏_当前目标坐标)
if self.map_matrix_copy[interest_coordinate2[0]][interest_coordinate2[1]]==-1:
interest_coordinate=None
interest_coordinate2=None
self.map_matrix_copy = self.remove_coordinates_in_range_map(self.map_matrix_copy,state.游戏_当前坐标,8, state.游戏_当前目标坐标)
if state.游戏_当前目标坐标 == None:
for _ in range(30):
if not state.状态_寻路中:
break
small_img = screenshot(hwnd, left, top, right, bottom, filename=None) # RGBA
# 识别位置和方向
state.游戏_当前视野角度 = get_view(small_img[int(height / 2) - 70:int(height / 2) + 70,
int(width / 2) - 70:int(width / 2) + 70], 70)
rrr=setangle(state.游戏_当前视野角度, self.get_next_angle(state.游戏_当前坐标, self.end))
if rrr==False:
break
else:
for _ in range(30):
if not state.状态_寻路中:
break
small_img = screenshot(hwnd, left, top, right, bottom, filename=None) # RGBA
# 识别位置和方向
state.游戏_当前视野角度 = get_view(small_img[int(height / 2) - 70:int(height / 2) + 70,
int(width / 2) - 70:int(width / 2) + 70], 70)
rrr=setangle(state.游戏_当前视野角度, self.get_next_angle(state.游戏_当前坐标, state.游戏_当前目标坐标))
if rrr==False:
break
pyautogui.keyDown('w')
if state.开关_是否展预测结果:
cv2.circle(big_img, state.游戏_当前目标坐标, 10, (255, 24, 255), -1)
if big_img.shape[2] == 4:
state.图片_找图 = cv2.cvtColor(big_img, cv2.COLOR_BGRA2BGR)
else:
state.图片_找图 = big_img
state.QT_信号.mysig_show_xunlu.emit()
# # 显示匹配结果
# cv2.imshow('Genshin navigation', big_img)
#print(f"识别时间:{time.time() - old_time}")
break
except :
time.sleep(0.3)
state.计数_卡主次数 = 0
state.计次_定位失败 += 1
if state.计次_定位失败 >= 15:
pyautogui.keyDown('shift')
time.sleep(0.3)
pyautogui.keyUp('shift')
roi = None
#traceback.print_exc()
def update_map_show(self,big_img):
for rdex, rowitem in enumerate(self.map_matrix_copy):
for cdex, column in enumerate(rowitem):
# 只判断符合范围内的 并且是 >1则是路线范围的坐标
if column > 0:
point=(rdex,cdex)
if column==1:
cv2.circle(big_img, point, 2, (255, 0, 0), -1)
elif column==2:
cv2.circle(big_img, point, 4, (0, 255, 0), -1)
elif column==3:
cv2.circle(big_img, point, 4, (0, 0, 255), -1)
def quickening(self,quickening_keys=None):
'''
:param quickening_keys: 用来随机赶路的按钮 比如 [{"probability":0.1,"keys":[ {"key":"1","time":0.2 }, {"key":"e","time":0.5}]}] 代表概率10% 按1 0.2秒 然后 按E 1秒 支持无数组合
:return:
'''
if quickening_keys==None:
return False
for quickening_key in quickening_keys:
if random.randint(1,100)<quickening_key["probability"]*100:
for key in quickening_key["keys"]:
if state.状态_循环开关== False:
logger.info("强制退出!")
pyautogui.keyUp('w')
return False
logger.info(f"触发赶路按键:{key['key']}")
pyautogui.keyDown(key["key"])
time.sleep(key["time"])
pyautogui.keyUp(key["key"])
time.sleep(0.1)
return True
def get_next_angle(self, start_pos, next_pos):
# 计算方向向量
direction_x = next_pos[0] - start_pos[0]
direction_y = next_pos[1] - start_pos[1]
# 使用反正切函数计算弧度角度值
angle_rad = math.atan2(direction_y, direction_x)
# 将弧度角度值转换为以北方向为0度的角度
angle_deg = math.degrees(angle_rad)
# 将角度值转换为顺时针方向
angle_deg = int((angle_deg + 360 + 90) % 360)
return angle_deg
def find_nearest_coordinate(self,coordinates, my_position,range_limit=100,distance_min=0):
'''
寻找当前位置最近的坐标点 顺便返回最近的兴趣点
:param coordinates:
:param my_position:
:param range_limit: # 搜索范围半径为50 提高效率
:return:
'''
min_distance_coordinate = float('inf')
nearest_coordinate =None
interest_coordinate=None
for rdex,rowitem in enumerate(coordinates) :
for cdex,column in enumerate(rowitem):
#只判断符合范围内的 并且是 >1则是路线范围的坐标
if column>0:
#x 附近 range_limit 范围的点 y 附近 range_limit 范围的点
if range_limit!=None:
if abs(my_position[0]-rdex)<=range_limit and abs(my_position[1]-cdex)<=range_limit :
#计算两个坐标点之间的距离 但是不要判断和自己一样的坐标.那没意义
if (my_position[0],my_position[1])!=(rdex,cdex):
distance = math.sqrt((my_position[0] - rdex) ** 2 + (my_position[1] - cdex) ** 2)
if distance>=distance_min:#要比最近小距离大.这样走位才不会老歪歪扭扭
if distance < min_distance_coordinate:
min_distance_coordinate = distance
if column==2:
interest_coordinate=[rdex,cdex]
nearest_coordinate = [rdex,cdex]
else:
# 计算两个坐标点之间的距离 但是不要判断和自己一样的坐标.那没意义
if (my_position[0], my_position[1]) != (rdex, cdex):
distance = math.sqrt((my_position[0] - rdex) ** 2 + (my_position[1] - cdex) ** 2)
if distance >= distance_min: # 要比最近小距离大.这样走位才不会老歪歪扭扭
if distance < min_distance_coordinate:
min_distance_coordinate = distance
if column == 2:
interest_coordinate = [rdex, cdex]
nearest_coordinate = [rdex, cdex]
return nearest_coordinate,interest_coordinate
def on_release(self, key: keyboard.KeyCode):
"""定义释放时候的响应"""
if key == Key.f9:#停止
state.状态_循环开关= False
state.状态_全局暂停=True
state.状态_已经有寻路了 = False
state.状态_是否禁止录制=True
self.on_rec = False
elif key == Key.f8:
self.on_rec = False
if state.状态_是否禁止录制:
if state.状态_循环开关== False:
return
state.状态_全局暂停=not state.状态_全局暂停
logger.info(f"state.状态_全局暂停{state.状态_全局暂停}")
if state.状态_全局暂停:
time.sleep(0.3)
pyautogui.keyUp('w')
elif key ==Key.f10:
if self.isRunRec:
self.interest_point.append(self.now_point)
logger.info(f"标记坐标{self.now_point}")
def load_map(self, image_path):
'''
加载地图 这个地图要障碍物地图
:param image_path:
:return:
'''
logger.info("加载地图中...")
# 读取图片
self.image = Image.open(image_path)
# 获取图片尺寸
self.width, self.height = self.image.size
# 定义地图
self.map_matrix = []
# 根据像素颜色生成地图数据
for x in range(self.width):
row=[]
for y in range(self.height):
pixel = self.image.getpixel((x, y))
if pixel[2] >= 240 and pixel[0] < 20 and pixel[1] < 20: # 蓝色
row.append(1)
elif pixel[0] >= 240 and pixel[1] < 20 and pixel[2] < 20: # 红色
self.end = (x, y)
row.append(3)
elif pixel[1] >= 240 and pixel[0] < 20 and pixel[2] < 20: # 绿色
row.append(2)
else:
row.append(0)
self.map_matrix.append(row)
logger.info("地图加载完毕!")
def get_window_handle_at_mouse_position(self, ):
active_hwnd = ctypes.windll.user32.GetForegroundWindow()
return active_hwnd
def remove_coordinates_in_range_map(self, coordinates, center, range_limit,nex_pos):
new_coordinates=coordinates.copy()
for rdex, rowitem in enumerate(new_coordinates):
for cdex, column in enumerate(rowitem):
# 只判断符合范围内的 并且是 >1则是路线范围的坐标
if column > 0:
if (rdex,cdex) == self.end: # 终点不能删除
continue
#将附近的点标记为走过了
elif abs(center[0]-rdex)<=range_limit and abs(center[1]-cdex)<=range_limit:
new_coordinates[rdex][cdex]=-1# 将该点标记为已经走过的,标记为-1
elif abs(center[0] - rdex) <= range_limit*2 and abs(center[1] - cdex) <= range_limit*2:
angle_target = self.get_next_angle(center, nex_pos) # 当前位置对准兴趣点的角度
angle_coordinate = self.get_next_angle(center, (rdex,cdex)) # 当前成员位置对准结束点的角度
angle_diff = abs(angle_target - angle_coordinate)
if angle_diff > 160 and angle_diff<=220 :
new_coordinates[rdex][cdex]=-1# 将该点标记为已经走过的,标记为-1
return new_coordinates
def draw_arrow(self,img, x, y, length=50, angle=45):
# 箭头的长度和角度(以北方向为0度,顺时针为正方向)
# 将角度转换为以北方向为0度的角度
degrees = (450 - angle) % 360
# 计算箭头末端的坐标
end_x = int(x + length * np.cos(np.radians(degrees)))
end_y = int(y - length * np.sin(np.radians(degrees)))
# 在图像上画线段
cv2.line(img, (x, y), (end_x, end_y), (0, 0,255), 5)
# 画箭头头部
theta = np.radians(degrees)
arrow_length = 10
arrow_angle = np.radians(30)
# 箭头头部的两个端点
arrow_pt1 = (end_x - int(arrow_length * np.cos(theta + arrow_angle)),
end_y + int(arrow_length * np.sin(theta + arrow_angle)))
arrow_pt2 = (end_x - int(arrow_length * np.cos(theta - arrow_angle)),
end_y + int(arrow_length * np.sin(theta - arrow_angle)))
# 画箭头头部
cv2.line(img, (end_x, end_y), arrow_pt1, (0, 0,255), 5)
cv2.line(img, (end_x, end_y), arrow_pt2, (0, 0,255), 5)
return img
if __name__ == '__main__':
# 创建一个事件循环对象
loop=asyncio.new_event_loop()
# image_path = r"datas/max_map/璃月史莱姆.png"
# image_path_barrier = r"datas/max_map/璃月史莱姆_路径.png"
# image_path = r"datas/max_map/max_map.png"
# image_path_barrier = r"datas/max_map/max_map_路径.png"
# image_path = r"datas/max_map/风龙废墟.png"
# image_path_barrier = r"datas/max_map/风龙废墟_路径.png"
# image_path = r"datas/max_map/达达乌帕谷.png"
# image_path_barrier = r"datas/max_map/达达乌帕谷_路径.png"
# image_path = r"datas/max_map/名椎滩.png"
# image_path_barrier = r"datas/max_map/名椎滩_路径.png"
#
image_path_barrier = r"datas/max_map/瑶光滩_路径.png"
#
# # image_path = r"datas/max_map/孤云阁.png"
# # image_path_barrier = r"datas/max_map/孤云阁_路径.png"
#
# #在新线程中运行事件循环
# thread = threading.Thread(target=loop.run_forever)
# thread.setDaemon(True)
# thread.start()
# # 在事件循环中创建一个任务对象并加入事件循环
# task = loop.create_task(main(True))
#
#
# quickening_keys = [{"probability": 0.1, "keys": [{"key": "1", "time": 0.4}, {"key": "e", "time": 0.5}]},
# {"probability": 0.1, "keys": [{"key": "2", "time": 0.3}, {"key": "e", "time": 0.5}]},
# {"probability": 0.1,
# "keys": [{"key": "3", "time": 0.3}, {"key": "space", "time": 0.2}, {"key": "e", "time": 0.5},
# {"key": "e", "time": 0.5}]}
# ]
ysam = YunShenAutoMap()
chuansong_path = r"datas/Task/孤云阁/传送模板.png"
maodian_path = r"datas/Task/孤云阁/锚点模板.png"
ysam.run_chuansong(chuansong_path,maodian_path)
#ysam.run_playback(image_path_barrier, image_path, quickening_keys=None,isShow=state.开关_是否显示地图)
#ysam.run_rec(image_path)
2、大图定位小图
vi findimg.py
# -*- coding: utf-8 -*-
import time
import cv2
import numpy as np
def bbbb(big_img,small_img,sim=0.7):
old_time=time.time()
g1=cv2.cvtColor(small_img,cv2.COLOR_BGR2GRAY)
g2=cv2.cvtColor(big_img,cv2.COLOR_BGR2GRAY)
sift=cv2.SIFT_create(nfeatures=0, nOctaveLayers=10)
kp1,des1=sift.detectAndCompute(g1,None)
kp2,des2=sift.detectAndCompute(g2,None)
index_params = dict(algorithm=1, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good_matches = []
for (m,n) in matches:
if m.distance < sim*n.distance:
good_matches.append(m)
print(good_matches)
if len(good_matches) < 4:
print("Not enough matching points")
return None
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
#查找单应性矩阵
H,mask=cv2.findHomography(src_pts,dst_pts,cv2.RANSAC,5.0)
matches_mask = mask.ravel().tolist()
h,w=small_img.shape[:2]
pts=np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)
dst=cv2.perspectiveTransform(pts,H)
pypts = []
for npt in dst.astype(int).tolist():
pypts.append(tuple(npt[0]))
lt, br = pypts[0], pypts[2]
middle_point = (lt[0] + br[0]) / 2, (lt[1] + br[1]) / 2 # 计算中心点
print(dst,time.time()-old_time,middle_point,min(1.0 * matches_mask.count(1) / 10, 1.0))
#画多边形
cv2.polylines(big_img, [np.int32(dst)], True, (0, 255, 0),2)
cv2.imshow("Matched Image", big_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def aaaa(big_img,small_img,sim=0.7):
# 检查small_img的尺寸
if small_img.shape[0] < 50 or small_img.shape[1] < 50:
print("small_img的尺寸太小")
return None
g1 = cv2.cvtColor(small_img, cv2.COLOR_BGR2GRAY)
g2 = cv2.cvtColor(big_img, cv2.COLOR_BGR2GRAY)
# 检查g1的尺寸
if g1.shape[0] <= 0 or g1.shape[1] <= 0:
print("g1的尺寸小于等于0")
return None
orb = cv2.ORB_create(nfeatures=5000, scaleFactor=3.0, nlevels=8)
kp2, des2 = orb.detectAndCompute(g2, None)
kp1, des1 = orb.detectAndCompute(g1, None)
index_params = dict(algorithm=6, table_number=6, key_size=12, multi_probe_level=2)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good_matches = []
for (m, n) in matches:
if m.distance < sim * n.distance:
good_matches.append(m)
if len(good_matches) < 4:
print("Not enough matching points")
return None
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matches_mask = mask.ravel().tolist()
h, w = small_img.shape[:2]
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, H)
pypts = []
for npt in dst.astype(int).tolist():
pypts.append(tuple(npt[0]))
lt, br = pypts[0], pypts[2]
middle_point = (lt[0] + br[0]) / 2, (lt[1] + br[1]) / 2 # 计算中心点
print(middle_point)
cv2.polylines(big_img, [np.int32(dst)], True, (0, 255, 0), 2)
cv2.imshow("Matched Image", big_img)
cv2.waitKey(0)
cv2.destroyAllWindows
def findImg(big_img,small_img,sim=0.7,min_match_count= 4,threshold=0.7):
'''
图片查找_特征匹配_最佳位置
:param big_img:
:param small_img:
:param sim:
:param min_match_count:
:param threshold:
:return: {"result": 中心点, "rectangle": 矩阵信息, "confidence": 0.76,'dst_pot': array(线条信息) 方便 cv2.polylines使用}
'''
try:
return findImgALL(big_img,small_img,sim,maxcnt=1,threshold=threshold,min_match_count=min_match_count)[0]
except:
return {}
def findImgALL_(big_img, small_img,min_match_count=10, maxcnt=0):
'''
使用sift算法进行多个相同元素的查找
Args:
im_source(string): 图像、素材
im_search(string): 需要查找的图片
threshold: 阈值,当相识度小于该阈值的时候,就忽略掉
maxcnt: 限制匹配的数量
Returns:
A tuple of found [(point, rectangle), ...]
A tuple of found [{"result": point, "rectangle": rectangle, "confidence": 0.76}, ...]
rectangle is a 4 points list
'''
#sift = cv2.SIFT_create(nfeatures=0, nOctaveLayers=20)
#small_img = remove_background(small_img)
# 增加nfeatures的值:通过增加nfeatures参数的值,可以提取更多的特征点。默认情况下,该值为
# 0,表示提取所有的特征点。你可以逐渐增加这个值,看看是否能够得到更多细节丰富的特征点。
# 调整edgeThreshold参数:该参数用于控制边缘阈值,即排除边缘特征点的数量。通过减小edgeThreshold的值,
# 可以允许更多的边缘特征点被提取出来,从而增加特征的细节。
# 调整sigma参数:sigma 参数用于控制高斯滤波器的标准差,从而影响特征点的尺度范围。较小的sigma值可以提取到更细微的特征,
# 但也可能导致特征点w数量过多。你可以尝试增大或减小sigma的值,找到适合你任务的最佳取值。
sift = cv2.SIFT_create(sigma=1.3)
flann = cv2.FlannBasedMatcher({'algorithm': 1, 'trees': 5}, dict(checks=50))
g1 = cv2.cvtColor(small_img, cv2.COLOR_BGR2GRAY)
g2 = cv2.cvtColor(big_img, cv2.COLOR_BGR2GRAY)
kp_sch, des_sch = sift.detectAndCompute(g1, None)
if len(kp_sch) < min_match_count:
return None
kp_src, des_src = sift.detectAndCompute(g2, None)
if len(kp_src) < min_match_count:
return None
result = []
while True:
# 匹配两个图片中的特征点,k=2表示每个特征点取2个最匹配的点
matches = flann.knnMatch(des_sch, des_src, k=2)
good = []
for m, n in matches:
# 剔除掉跟第二匹配太接近的特征点
if m.distance < 0.75* n.distance:
good.append(m)
if len(good) < min_match_count:
break
#print(len(good))
sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
img_pts = np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
# M是转化矩阵
M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)
matches_mask = mask.ravel().tolist()
# 计算四个角矩阵变换后的坐标,也就是在大图中的坐标
h, w = small_img.shape[:2]
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
try:
dst = cv2.perspectiveTransform(pts, M)
except:
break
# trans numpy arrary to python list
# [(a, b), (a1, b1), ...]
pypts = []
for npt in dst.astype(int).tolist():
pypts.append(tuple(npt[0]))
sim = min(1.0 * matches_mask.count(1) / 10, 1.0)
X = (pypts[0][0] + pypts[1][0] + pypts[2][0] + pypts[3][0]) // 4
Y = (pypts[0][1] + pypts[1][1] + pypts[2][1] + pypts[3][1]) // 4
middle_point = (X, Y)#计算中心点
result.append(dict(
result=middle_point,
rectangle=pypts,
confidence=sim,
dst_pot=[np.int32(dst)]
))
if maxcnt and len(result) >= maxcnt:
break
# 符合的特征点存起来 用以后删除
qindexes, tindexes = [], []
for m in good:
#将已经找到的先存起来 后面方便删除
qindexes.append(m.queryIdx)
tindexes.append(m.trainIdx)
def filter_index(indexes, arr):
r = []
for i, item in enumerate(arr):
if i not in indexes:
r .append(item)
return np.array(r )
#更新数组
kp_src = filter_index(qindexes,kp_src)
des_src = filter_index(tindexes,des_src)
return result
def findImgALL(big_img, small_img, roi=None, min_match_count=10, maxcnt=0):
'''
使用sift算法进行多个相同元素的查找
Args:
big_img(numpy.ndarray): 大图像
small_img(numpy.ndarray): 需要查找的小图像
roi(tuple): 感兴趣区域的坐标,格式为(x, y, width, height)
min_match_count(int): 最小匹配数量
maxcnt(int): 匹配的最大数量
Returns:
A list of found [{"result": point, "rectangle": rectangle, "confidence": 0.76}, ...]
rectangle is a 4 points list
'''
try:
sift = cv2.SIFT_create(sigma=1.3)
flann = cv2.FlannBasedMatcher({'algorithm': 1, 'trees': 5}, dict(checks=50))
g1 = cv2.cvtColor(small_img, cv2.COLOR_BGR2GRAY)
if roi:
x, y, width, height = roi
roi_img = big_img[y:y + height, x:x + width]
g2 = cv2.cvtColor(roi_img, cv2.COLOR_BGR2GRAY)
else:
g2 = cv2.cvtColor(big_img, cv2.COLOR_BGR2GRAY)
g1 = cv2.adaptiveThreshold(g1, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 21, 2)
g2 = cv2.adaptiveThreshold(g2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 21, 2)
kp_sch, des_sch = sift.detectAndCompute(g1, None)
if len(kp_sch) < min_match_count:
return None
kp_src, des_src = sift.detectAndCompute(g2, None)
if len(kp_src) < min_match_count:
return None
result = []
while True:
matches = flann.knnMatch(des_sch, des_src, k=2)
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
if len(good) < min_match_count:
break
sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
img_pts = np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)
matches_mask = mask.ravel().tolist()
h, w = small_img.shape[:2]
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
try:
if roi:
dst = cv2.perspectiveTransform(pts, M) + [x, y]
else:
dst = cv2.perspectiveTransform(pts, M)
except:
break
pypts = []
for npt in dst.astype(int).tolist():
pypts.append(tuple(npt[0]))
#保留小数点后两位
sim = round(min(1.0 * matches_mask.count(1) / len(matches_mask), 1.0),2)
X = (pypts[0][0] + pypts[1][0] + pypts[2][0] + pypts[3][0]) // 4
Y = (pypts[0][1] + pypts[1][1] + pypts[2][1] + pypts[3][1]) // 4
middle_point = (X, Y)
if 0 > middle_point[0] > w or 0 > middle_point[1] > h:
continue
result.append(dict(
result=middle_point,
rectangle=pypts,
confidence=sim,
dst_pot=[np.int32(dst)]
))
if maxcnt and len(result) >= maxcnt:
break
qindexes, tindexes = [], []
for m in good:
qindexes.append(m.queryIdx)
tindexes.append(m.trainIdx)
def filter_index(indexes, arr):
r = []
for i, item in enumerate(arr):
if i not in indexes:
r.append(item)
return np.array(r)
kp_src = filter_index(qindexes, kp_src)
des_src = filter_index(tindexes, des_src)
return result
except:
return None
if __name__ == '__main__':
big_img = cv2.imdecode(np.fromfile(file=r"datas/max_map/蒙德1.png", dtype=np.uint8), cv2.IMREAD_COLOR) # 加载大图
small_img = cv2.imdecode(np.fromfile(file=r"datas/img/1704095733.5493054_8.png", dtype=np.uint8), cv2.IMREAD_COLOR)
ret=findImgALL(big_img,small_img,threshold=0.7)
#ret = ac.find_all_template(big_img, small_img, threshold=0.5, rgb=True, bgremove=True)
#aaaa(big_img,small_img,sim=0.6)
#图片查找_特征匹配_最佳(big_img,small_img)
result=ret[0]
for result in ret:
print(result)
pos = result['rectangle']
confidence = result['confidence']
cv2.polylines(big_img, result['dst_pot'], True, (0, 255, 0), 2)
# 定义点的颜色和半径
color = (0, 0, 255) # 红色
radius = 5
# 绘制点
cv2.circle(big_img, result["result"], radius, color, -1)
# 显示匹配结果
cv2.imshow('Matched Image', big_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
3、键鼠事件监听器
这段代码是一个简单的键盘和鼠标事件监听器,它可以捕获用户的键盘按键和鼠标点击以及滚动事件。该代码定义了一个名为 HookKeyMose 的类,它提供了两种监听模式:阻塞模式 (listen_key_block) 和非阻塞模式 (listen_key_nblock 和 listen_mouse_nblock)。阻塞模式会使得程序在监听过程中挂起,而非阻塞模式则是在后台启动一个监听线程,这样主线程可以继续执行其他任务。
类的主要方法
初始化方法 (__init__): 根据传入的参数决定是否开启鼠标和键盘的监听。
按键事件处理 (on_press, on_release): 分别在按键按下和释放时调用,但默认实现为空。
鼠标事件处理 (on_move, on_click, on_scroll): 分别处理鼠标移动、点击和滚动事件,默认实现为空。
阻塞式监听键盘 (listen_key_block): 使用 keyboard.Listener 启动监听,并且该方法会阻塞当前线程直到监听结束。
非阻塞式监听键盘 (listen_key_nblock): 使用 keyboard.Listener 启动监听,但该方法不会阻塞当前线程,而是将监听作为一个新线程启动。
非阻塞式监听鼠标 (listen_mouse_nblock): 使用 mouse.Listener 启动监听,同样作为新线程启动。
主函数 (if __name__ == '__main__':)
主函数部分创建了一个无限循环,目的是为了维持主线程的运行,因为如果主线程结束,则所有子线程也会随之结束。在这个例子中,由于没有显式地启动监听器实例,实际上并不会有任何监听行为发生。如果要启用监听功能,需要在 __main__ 中创建 HookKeyMose 类的实例,并调用对应的监听方法。
vi lanrentools/hookkeymose.py
# -*- coding: utf-8 -*-
from pynput import keyboard
from pynput import mouse
class HookKeyMose:
def __init__(self,hookMouse=False,hookKey=True):
'''
可通过集成本类 然后重写on_函数来实现hook回返
'''
if hookMouse==True:
self.listen_mouse_nblock()
if hookKey==True:
self.listen_key_nblock()
def on_press(self,key):
"""定义按下时候的响应,参数传入key"""
pass
def on_release(self,key):
"""定义释放时候的响应"""
pass
#print(f'{key} up')
def on_move(self,x, y):
pass
#print('move to', x, y)
def on_click(self,x, y, button, pressed):
pass
#print('click at', x, y, button, pressed)
def on_scroll(self,x, y, dx, dy):
pass
#print('scroll at', x, y, 'by', dx, dy)
# 监听写法1
def listen_key_block(self):
with keyboard.Listener(
on_press=self.on_press, on_release=self.on_release) as listener:
listener.join() # 加入线程池,阻塞写法
# 监听写法2
def listen_key_nblock(self):
self.listener_keyboard = keyboard.Listener(
on_press=self.on_press, on_release=self.on_release
)
self.listener_keyboard .start() # 启动线程
def listen_mouse_nblock(self):
self.listener_mouse = mouse.Listener(
on_move=None, # 因为on_move太多输出了,就不放进来了,有兴趣可以加入
on_click=self.on_click,
on_scroll=self.on_scroll
)
self.listener_mouse.start()
if __name__ == '__main__':
while True: # 这里应该用一个循环维持主线程,否则主线程结束了子线程就自动结束了
pass