python으로 구현된 자동 채팅 로봇

9740 단어
매일 아침 위챗으로 일기예보를 보내면 마누라에게 얼마나 좋을까 생각하고 인터넷 강의를 보면서 정해진 시간에 일기예보를 보낼 수 있는 프로그램을 만들었기 때문이다.
최근에 또 고생을 생각해서 더욱 상세한 판본을 만들었다.하지만 능동적인 조작이 필요하다
구체적인 조작은 그림을 본다.
부분 코드:
#coding=utf8
import requests
from requests import exceptions
from urllib.request import urlopen
from bs4 import BeautifulSoup
from urllib.parse import urlencode
from threading import Timer
import re
from wxpy import *
import  schedule
import  time
import http
import  json 
import datetime
import random

bot = Bot(cache_path=True,console_qr = 1)
myself = bot.self
bot.enable_puid('wxpy_puid.pkl')
tuling = Tuling(api_key='       key')
group = bot.groups().search(u'Test')
shgroup = bot.groups().search('   ')
friends = bot.friends().search(u'Lie')
msgText = "Helo!    '  '      
1. ( : )
2. nba( : )
3.
4.
5. ( )
6.
7.
8.
9.nba ( : )
10.
1.
2.
3.
4.
11. ( : )" # newText = " :
1.
2.
3.
4. " def get_now_weather(city): header = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.8', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235' } url = 'https://free-api.heweather.com/s6/weather/now?location='+city+'&key= key' PMurl = 'https://free-api.heweather.com/s6/air/now?parameters&location='+city+'&key= key' # , timeout = random.choice(range(80, 180)) rep = requests.get(url, headers=header, timeout=timeout) pm = requests.get(PMurl, headers=header, timeout=timeout) result = '' temp = rep.json() temp = temp['HeWeather6'][0] update = temp['update'] now = temp['now'] nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') pm = pm.json() pm = pm['HeWeather6'][0] print(now) airnow = pm['air_now_city'] result = city + ' -' + '
'\ + ' :'+ update['loc'] + '
'\ + ' :'+ now['cond_txt'] + '
'\ + ' :'+ now['tmp'] + '°C' + '
'\ + ' :'+ now['fl'] + '°C' + '
'\ + ' :'+ now['wind_dir'] + ' ' + now['wind_sc'] + ' '+ now['wind_spd'] + ' / '+ '
'\ + ' :'+ now['hum'] + '%' + '
'\ + ' :'+ now['pcpn'] + 'ml' + '
'\ + ' :'+ now['vis'] + ' ' + '
'\ + ' :'+ now['cloud'] + '
'\ + '-----------------------------------' + '
'\ + ' :'+'
'\ + ' :'+ airnow['aqi']+'
'\ + ' :'+ airnow['main']+'
'\ + ' :'+ airnow['qlty']+'
'\ + ' :'+ airnow['no2']+'
'\ + ' :'+ airnow['so2']+'
'\ + ' :'+ airnow['co']+'
'\ + ' pm10 :'+ airnow['pm10']+'
'\ + ' pm25 :'+ airnow['pm25']+'
'\ + ' :'+ airnow['o3']+'
' result = result + ' :' + nowTime + '
' return result def get_news(type): header = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.8', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235' } url = 'http://v.juhe.cn/toutiao/index?type='+str(type)+'&key= key' timeout = random.choice(range(80, 180)) rep = requests.get(url, headers=header, timeout=timeout) data = json.loads(rep.text) data = data['result'] data = data['data'] item = [] obj = {} html = ' '+str(type)+' :'+ '
' for i in data: html = html + ' :' + i['title'] + '
'\ + ' :' + i['url'] + '
'\ + ' :' + i['category'] + '
'\ + ' :' + i['author_name'] + '
'\ + ' :' + i['date'] + '
'\ + '-----------------------------------------------' + '
' +'
' \ return html def get_star(name): header = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.8', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235' } url = 'http://web.juhe.cn:8080/constellation/getAll?consName='+str(name)+'&type=today&key= key' timeout = random.choice(range(80, 180)) rep = requests.get(url, headers=header, timeout=timeout) data = json.loads(rep.text) starhtml = ' '+str(name)+' :'+ '
'\ + ' :' + data['all'] + '
'\ + ' :' + data['color'] + '
'\ + ' :' + data['health'] + '
'\ + ' :' + data['love'] + '
'\ + ' :' + data['money'] + '
'\ + ' :' + data['QFriend'] + '
'\ + ' :' + data['work'] + '
'\ + ' :' + data['summary'] + '
'\ return starhtml def get_nba(): resp = urlopen('https://m.hupu.com/nba/game') soup = BeautifulSoup(resp,'html.parser') tagToday = soup.find('section',class_="match-today") nbaHtml = ' NBA :' + '
' + '
' for tag in tagToday.find_all('a', class_='match-wrap'): nbaHtml = nbaHtml + tag.find('div', class_='away-team').span.get_text() + ' ' + tag.find('strong', class_='').span.get_text() + ' ' + tag.find('div', class_='home-team').span.get_text() + ' (' + tag.find('div', class_='match-status-txt').get_text() +')' + '
' return nbaHtml def get_rank(): resp = urlopen('https://m.hupu.com/nba/stats') soup = BeautifulSoup(resp,'html.parser') east = soup.find_all('li',class_= "weast")[0] west = soup.find_all('li',class_= "weast")[1] rankHtml = ' NBA :(1. 2. 3. 4. 5. )' + '
' + '
' for tag in east.find_all('li', class_=''): list = tag.find('p', class_='right-data') rankHtml = rankHtml + tag.find('span', class_='rank').get_text() + '. ' + tag.find('div', class_='').h1.get_text() + ' ' + list.find_all('span')[0].get_text() + ' ' + list.find_all('span')[1].get_text() +' '+ list.find_all('span')[2].get_text() +'
' rankHtml = rankHtml + '
' + '
' + '---------------------------------------------' + '
' + '
' rankHtml = rankHtml + ' NBA :(1. 2. 3. 4. 5. )' + '
' + '
' for tag in west.find_all('li', class_=''): list = tag.find('p', class_='right-data') rankHtml = rankHtml + tag.find('span', class_='rank').get_text() + '. ' + tag.find('div', class_='').h1.get_text() + ' ' + list.find_all('span')[0].get_text() + ' ' + list.find_all('span')[1].get_text() +' '+ list.find_all('span')[2].get_text() +'
' return rankHtml def invite(user): print('4') group = bot.groups().search('cc') group[0].add_members(user, use_invitation=True) @bot.register(msg_types=FRIENDS) @bot.register(group) @bot.register(shgroup,TEXT) @bot.register(friends)

좋아하는 건 위챗 wex5201314 검증 정보[py]가 당신을 그룹에 끌어들여 체험을 하고 관심 공인 번호[이야기 필름]를 얻으며 원본 코드를 획득합니다.
QR코드를 직접 스캔하거나

좋은 웹페이지 즐겨찾기