加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
KanJiangRBao.py 4.78 KB
一键复制 编辑 原始数据 按行查看 历史
Cwoner 提交于 2024-06-03 09:44 . first commit
# -*-coding:utf-8 -*-
# @Time: 2023/2/20 0020 下午 14:33
# @Author: Cwoner
# @Organization: CTIM
# @Software: PyCharm
import time
import requests
from lxml import etree
import hashlib
from setting import SAVEPATH
import os
import re
from tools.serverAPI import upload_file
from tools.give_an_alarm import send_email
class KanJiangRBao():
def __init__(self,ccdate,id=''):#20230101
self.url = f'http://paper.gdzjdaily.com.cn/html/{ccdate[:4]}-{ccdate[4:6]}/{ccdate[6:]}/node_1.htm'
self.ccdate = ccdate
self.id = id
self.name = '湛江日报'
print('初始化:',self.name, self.url)
self.mid = hashlib.md5((self.name+'_baozi').encode()).hexdigest()
if not os.path.isdir(SAVEPATH+self.mid):
os.mkdir(SAVEPATH+self.mid)
def index(self,c=0):
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
}
try:
response = requests.get(self.url, headers=headers, verify=False,timeout=10)
except:
c += 1
if c > 2:
# send_email(f'报纸采集系统异常-{self.name}-{int(time.time())}',f'主页请求器异常,异常次数超3次,采集退出。报纸:{self.name}. mid: {self.mid}. 报纸日期:{self.ccdate}.')
return
return self.index(c)
if response.status_code == 200:
text = response.content.decode('utf-8')
html = etree.HTML(text)
bans = html.xpath('//table[@id="bmdhTable"]//tr')
i = 0
for banItem in bans:
title = str(i) + banItem.xpath('.//a[@id="pageLink"]/text()')[0]
if not title:
title = f'未知标题{i}'
print(self.name,title)
burl = banItem.xpath('.//td[2]/a/@href')[0]
print(burl)
if 'pdf' in burl:
pdf_url = 'http://paper.gdzjdaily.com.cn/'+ burl
else:
continue
tid = hashlib.md5((self.name+title+self.ccdate).encode()).hexdigest()
file = tid + '.pdf'
if pdf_url:
pdf_url = pdf_url.replace('../../..','')
print(pdf_url),
self.__download(file,pdf_url,title,tid)
i += 1
else:
send_email(f'报纸采集系统异常-{self.name}-{int(time.time())}',
f'主页响应状态码异常:{response.status_code}。'
f'请求的报纸主页:{self.url}. 报纸:{self.name}. mid: {self.mid}. 报纸日期:{self.ccdate}.')
def __download(self,file,url,title,tid,c=0):
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
}
try:
response = requests.get(url, headers=headers, verify=False,timeout=10)
except:
c += 1
if c > 2:
send_email(f'报纸采集系统异常-{self.name}-{int(time.time())}',f'下载报纸数据异常,异常次数超3次,现已终止该页面的采集。页面地址:{url}。 报纸:{self.name}. mid: {self.mid}. 报纸日期:{self.ccdate}.')
return
return self.__download(file,url,title,tid,c)
if response.status_code == 200:
path = SAVEPATH + self.mid + '/' + self.ccdate + '/'
if not os.path.isdir(path):
os.mkdir(path)
with open(path + file,'wb',) as f:
f.write(response.content)
data = {
'entity_id': self.mid,
'title': title,
'tid': tid,
'file_name': file,
'origin_url': url,
'ndate': self.ccdate[:4] + '-' + self.ccdate[4:6] + '-' + self.ccdate[6:]
}
upload_file(data, response.content)
def run(self):
self.index()
if __name__ == '__main__':
ccdate = '20240408'
hr = KanJiangRBao(ccdate)
hr.run()
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化