代码拉取完成,页面将自动刷新
import requests
import re
from readImage import read_captcha, html_to_playList, html_to_playOne, html_to_playList2
from baidu import parse
from datetime import datetime
import schedule
import time
from mylog import logger
from requests.exceptions import ConnectTimeout
class MyRequest:
base_url_img = "https://1080zyk1.com/inc/common/code.php?a=search&s="
base_url_img_submit = (
"https://1080zyk1.com/inc/ajax.php?ac=code_check&code=sss&type=search"
)
base_url_search = "https://1080zyk1.com/index.php?m=vod-search"
base_url = "https://1080zyk1.com"
id = "2iesssc4631e1jrq3tr3eh7qm2"
repeat = 0
@classmethod
def taskSchedule(cls):
schedule.every(1).minutes.do(cls.taskOne)
while True:
# 运行所有已计划的任务
schedule.run_pending()
time.sleep(1)
@classmethod
def taskOne(cls):
try:
cookies = {"PHPSESSID": cls.id}
response = requests.get(cls.base_url, cookies=cookies)
# 获取当前时间
current_time = datetime.now()
# 格式化时间字符串
formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
print(formatted_time + "定时任务")
except ConnectTimeout as e:
logger.error("定时任务超时:" + e.to_string())
except Exception as e:
logger.error("定时任务:" + e.to_string())
@classmethod
def Login(cls):
response = requests.get(cls.base_url_img, stream=True)
received_cookies = response.cookies
cls.id = received_cookies["PHPSESSID"]
with open("c:\\image.jpg", "wb") as file:
file.write(response.content)
image_key = parse("c:\\image.jpg")
# image_key = read_captcha("d:\\image.jpg")
print(image_key)
# image_key = read_captcha(response.raw)
url = cls.base_url_img_submit.replace("sss", image_key)
cookies = {"PHPSESSID": cls.id}
response = requests.get(url, cookies=cookies)
content = response.content.decode("utf-8")
if "1002" in content:
print("验证码错误" + str(cls.repeat))
cls.repeat = cls.repeat + 1
cls.Login()
@classmethod
def search(cls, key):
form_data = {
"wd": key,
}
cookies = {"PHPSESSID": cls.id}
response = requests.post(cls.base_url_search, data=form_data, cookies=cookies)
html_bytes = response.content
html_text = html_bytes.decode("utf-8")
matches = re.findall(r'<a\s.*?href="(.*?)"[^>]*>(.*?)</a>', html_text)
first = "福利"
end = "采集赞助"
begin = False
result = []
for match in matches:
href, movie_name = match
if end in movie_name:
begin = False
if begin:
# print(f"电影名称: {movie_name}, href属性: {href}")
result.append({"name": movie_name, "url": href})
if first in movie_name:
begin = True
if len(result) == 0:
if "请输入验证码" in html_text:
cls.Login()
# result = cls.search(key)
return result
@classmethod
def playList(cls, url):
response = requests.get(cls.base_url + url)
html_bytes = response.content
html_text = html_bytes.decode("utf-8")
ds = html_to_playList(html_text)
# for d in ds:
# print(d.get("url"), d.get("text"))
return ds
@classmethod
def playList2(cls, url):
response = requests.get(cls.base_url + url)
html_bytes = response.content
html_text = html_bytes.decode("utf-8")
ds = html_to_playList2(html_text)
# for d in ds:
# print(d.get("url"), d.get("text"))
return ds
@classmethod
def playOne(cls, url, title):
response = requests.get(cls.base_url + url)
html_bytes = response.content
html_text = html_bytes.decode("utf-8")
series = cls.jishu(title)
ds = html_to_playOne(html_text, series)
print(ds)
return ds
@classmethod
def jishu(cls, input_string):
pattern = r"第(\d+)集"
# 使用正则表达式匹配字符串
match = re.search(pattern, input_string)
# 提取匹配的数据
if match:
episode_number = match.group(1)
print("提取到的集数为:", episode_number)
return episode_number
else:
print("未找到匹配的数据")
return None
if __name__ == "__main__":
txt = MyRequest.jishu("繁花 zykyun 第02集")
print(txt)
# MyRequest.Login()
# result = MyRequest.search("复仇者联盟")
# result = MyRequest.playList(result[0].get("url"))
# MyRequest.playOne(result[0].get("url"))
# MyRequest.playList("/?m=vod-detail-id-25741.html")
# MyRequest.playOne("/?m=vod-play-id-25741-src-2-num-1.html")
# response = requests.get(MyRequest.base_url_img, stream=True)
# with open("d:\\image.jpg", "wb") as file:
# file.write(response.content)
# image_key = read_captcha("d://image.jpg")
# print(image_key)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。