python+智谱AI-实现钉钉消息自动回复 实现了电脑窗口切换,截图识别未读消息,与语言模型交互后,将答案带入到钉钉窗口中。 偷个懒,直接贴代码了,后续不断完善注释,如果遇到读不懂的地方,欢迎交流。
实现了电脑窗口切换,截图识别未读消息,与语言模型交互后,将答案带入到钉钉窗口中。
偷个懒,直接贴代码了,后续不断完善注释,如果遇到读不懂的地方,欢迎交流。
import time
import pygetwindow
from PIL import ImageGrab, Image
import time
import cv2
import numpy as np
import pyautogui
import easyocr
import os
import pytesseract
import zhipuai
from zhipuai import ZhipuAI
import pyperclip
import tkinter as tk
def openchat ( xm, ym) : pyautogui. moveTo( xm, ym, duration= 1 ) time. sleep( 2 ) pyautogui. click( xm, ym)
def watchtext ( imgurl) : print ( '识别图片' ) image = cv2. imread( imgurl) gray = cv2. cvtColor( image, cv2. COLOR_BGR2GRAY) thresh = cv2. threshold( gray, 0 , 255 , cv2. THRESH_BINARY_INV + cv2. THRESH_OTSU) [ 1 ] pytesseract. pytesseract. tesseract_cmd = r'D:\Program Files\Tesseract-OCR\tesseract.exe' text = pytesseract. image_to_string( thresh, lang= 'chi_sim' ) print ( text) reader = easyocr. Reader( [ 'ch_sim' , 'en' ] , gpu= False ) img = Image. open ( imgurl) img_size = img. sizex = 0.20 * img_size[ 0 ] + 200 y = 0.1 * img_size[ 1 ] w = 1 * img_size[ 0 ] - 400 h = 1 * img_size[ 1 ] - 720 cropped = img. crop( ( x, y, x + w, y + h) ) cropped. save( 'test01.png' ) reader = easyocr. Reader( [ 'ch_sim' , 'en' ] , gpu= False , verbose= False ) result = reader. readtext( 'test01.png' , detail= 0 ) for i in result: print ( i, end= '' ) return result
def getmscreen ( ) : windowsjiantou = pygetwindow. getWindowsWithTitle( 'XXXX' ) windowsjiantou[ 0 ] . show( ) w = windowsjiantou[ 0 ] w. activate( ) left, top, width, height = w. left, w. top, w. width, w. heightw. activate( ) w. show( ) w. maximize( ) time. sleep( 0.5 ) print ( '运行到了这里' ) screenshot = ImageGrab. grab( bbox= ( left, top, left + width, top + height) ) timestamp = time. time( ) print ( "当前时间戳:" , timestamp) imgurl = str ( timestamp) + 'desktop_screenshot.png' img = pyautogui. screenshot( ) img. save( str ( timestamp) + 'desktop_screenshot.png' ) return imgurl
def getchat ( questiontext) : print ( questiontext) client = ZhipuAI( api_key= " . " ) response = client. chat. completions. create( model= "glm-4" , messages= [ { "role" : "user" , "content" : questiontext} , ] , tools= [ { "type" : "retrieval" , "retrieval" : { "knowledge_id" : " " , "prompt_template" : "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就用自身知识回答并且告诉用户该信息不是来自文档。\n不要复述问题,直接开始回答。" } } ] , stream= True , ) resstr = "" for chunk in response: resstr = resstr + str ( chunk. choices[ 0 ] . delta. content) print ( resstr) return resstr
def pasttext ( text) : windowsjiantou = pygetwindow. getWindowsWithTitle( 'XXXX' ) windowsjiantou[ 0 ] . show( ) w = windowsjiantou[ 0 ] w. activate( ) pyautogui. moveTo( 600 , 900 ) pyautogui. click( ) pyperclip. copy( text) time. sleep( 0.5 ) pyautogui. hotkey( 'ctrl' , 'v' )
def capture ( ) : desktop_window = pygetwindow. getAllWindows( ) desktop_window_title = pygetwindow. getAllTitles( ) for window in desktop_window_title: print ( window) windowsjiantou = pygetwindow. getWindowsWithTitle( 'XXXX' ) windowsjiantou[ 0 ] . show( ) w = windowsjiantou[ 0 ] w. activate( ) left, top, width, height = w. left, w. top, w. width, w. heightw. activate( ) w. show( ) w. maximize( ) time. sleep( 0.5 ) print ( '运行到了这里' ) screenshot = ImageGrab. grab( bbox= ( left, top, left + width, top + height) ) timestamp = time. time( ) print ( "当前时间戳:" , timestamp) screenshot. save( str ( timestamp) + 'desktop_screenshot.png' ) imgs = str ( timestamp) + 'desktop_screenshot.png' image = cv2. imread( imgs) hsv = cv2. cvtColor( image, cv2. COLOR_BGR2HSV) lower_red1 = np. array( [ 0 , 120 , 70 ] ) upper_red1 = np. array( [ 10 , 255 , 255 ] ) lower_red2 = np. array( [ 170 , 120 , 70 ] ) upper_red2 = np. array( [ 180 , 255 , 255 ] ) mask1 = cv2. inRange( hsv, lower_red1, upper_red1) mask2 = cv2. inRange( hsv, lower_red2, upper_red2) mask = cv2. bitwise_or( mask1, mask2) kernel = np. ones( ( 5 , 5 ) , np. uint8) mask = cv2. morphologyEx( mask, cv2. MORPH_OPEN, kernel) mask = cv2. dilate( mask, kernel, iterations= 1 ) contours, _ = cv2. findContours( mask, cv2. RETR_EXTERNAL, cv2. CHAIN_APPROX_SIMPLE) myusecolours = [ ] for contour in contours: area = cv2. contourArea( contour) if area > 50 : print ( '面积大于50' ) perimeter = cv2. arcLength( contour, True ) approx = cv2. approxPolyDP( contour, 0.04 * perimeter, True ) if len ( approx) < 10 : ( x, y) , radius = cv2. minEnclosingCircle( contour) center = ( int ( x) , int ( y) ) radius = int ( radius) if radius > 5 : print ( '绘制了一个图形print' ) cv2. circle( image, center, radius, ( 0 , 255 , 0 ) , 2 ) print ( contour) myusecolours. append( contour) print ( '----' ) myusecolours02 = myusecoloursmyusecolours02. reverse( ) print ( len ( myusecolours02) ) print ( len ( myusecolours02) ) if len ( myusecolours02) == 0 : return contoursmsg = myusecolours02[ - 1 ] x, y, w, h = cv2. boundingRect( contoursmsg) print ( f"Bounding box coordinates: x= { x} , y= { y} , w= { w} , h= { h} " ) ( xm, ym) , radius = cv2. minEnclosingCircle( contoursmsg) print ( f"Bounding box coordinates: ---------------------------- x= { xm} , y= { ym} " ) openchat( xm, ym) imgurl = getmscreen( ) textcontent = watchtext( imgurl) textcontent02 = '' for item in textcontent: print ( item+ '\n' ) textcontent02= textcontent02+ item+ '' textcontent. reverse( ) textcontent01 = textcontent[ 0 ] answer = getchat( textcontent01) pasttext( answer)
def say_hello ( ) : capture( )
if __name__ == '__main__' : capture( )