代码拉取完成,页面将自动刷新
同步操作将从 Cwoner/ctim_newspaper_spider 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
# -*-coding:utf-8 -*-
# @Time: 2023/2/20 0020 下午 14:33
# @Author: Cwoner
# @Organization: CTIM
# @Software: PyCharm
import time
import requests
from lxml import etree
import hashlib
from setting import SAVEPATH
import os
import re
from tools.serverAPI import upload_file
from tools.give_an_alarm import send_email
class MinDongRiBao():
def __init__(self,ccdate,id=''):#20230101
self.url = f'http://szb.ndpic.cn/html/{ccdate[:4]}-{ccdate[4:6]}/{ccdate[6:]}/node_1.htm?v=1'
self.ccdate = ccdate
self.id = id
self.name = '闽东日报'
print('初始化:',self.name, self.url)
self.mid = hashlib.md5((self.name+'_baozi').encode()).hexdigest()
if not os.path.isdir(SAVEPATH+self.mid):
os.mkdir(SAVEPATH+self.mid)
def index(self,c=0):
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
}
try:
response = requests.get(self.url, headers=headers, verify=False,timeout=10)
except:
c += 1
if c > 2:
# send_email(f'报纸采集系统异常-{self.name}-{int(time.time())}',f'主页请求器异常,异常次数超3次,采集退出。报纸:{self.name}. mid: {self.mid}. 报纸日期:{self.ccdate}.')
return
return self.index(c)
if response.status_code == 200:
text = response.content.decode('utf-8')
html = etree.HTML(text)
bans = html.xpath('.//a[@class="rigth_bmdh_href"]')
i = 0
for banItem in bans:
title = banItem.xpath('text()')[0]
title = str(i) + re.sub('\s','',title)
if not title:
title = f'未知标题{i}'
print(self.name,title)
pdf_url = ''
if 'pdf' in banItem.xpath('../../td[2]/a/@href')[0]:
pdf_url = 'http://szb.ndpic.cn/'+ banItem.xpath('../../td[2]/a/@href')[0]
tid = hashlib.md5((self.name+title+self.ccdate).encode()).hexdigest()
file = tid + '.pdf'
if pdf_url:
pdf_url = pdf_url.replace('../../..','')
print(pdf_url),
self.__download(file,pdf_url,title,tid)
i += 1
else:
pass
def __download(self,file,url,title,tid,c=0):
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
}
try:
response = requests.get(url, headers=headers, verify=False,timeout=10)
except:
c += 1
if c > 2:
send_email(f'报纸采集系统异常-{self.name}-{int(time.time())}',f'下载报纸数据异常,异常次数超3次,现已终止该页面的采集。页面地址:{url}。 报纸:{self.name}. mid: {self.mid}. 报纸日期:{self.ccdate}.')
return
return self.__download(file,url,title,tid,c)
if response.status_code == 200:
path = SAVEPATH + self.mid + '/' + self.ccdate + '/'
if not os.path.isdir(path):
os.mkdir(path)
with open(path + file,'wb',) as f:
f.write(response.content)
data = {
'entity_id': self.mid,
'title': title,
'tid': tid,
'file_name': file,
'origin_url': url,
'ndate': self.ccdate[:4] + '-' + self.ccdate[4:6] + '-' + self.ccdate[6:]
}
upload_file(data, response.content)
def run(self):
self.index()
if __name__ == '__main__':
ccdate = '20240428'
hr = MinDongRiBao(ccdate)
hr.run()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。