Python爬取网站小说保存txt,pdf文件

论坛 期权论坛 编程之家     
选择匿名的用户   2021-5-24 07:13   11   0
# 爬取小说 http://www.hengyan.com/dir/9495.aspx

from lxml.html import etree
import requests
import re
import os
import pdfkit

host = "http://www.hengyan.com"
url = "http://www.hengyan.com/dir/9495.aspx"
temp = ''
outPutDirName = 'D:/Book/'
config = pdfkit.configuration(wkhtmltopdf=r"D:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe")

if not os.path.exists(outPutDirName):
    # 如果文件目录不存在则创建目录
    os.makedirs(outPutDirName)

response = requests.get(url)
selector = etree.HTML(response.text)

for i in range(1, 43):
 words = selector.xpath('//*[@id="left"]/div[3]/ul/li[' + str(i) + ']/a/text()')
 hrefs = selector.xpath('//*[@id="left"]/div[3]/ul/li[' + str(i) + ']/a/@href')
 address = host + ''.join(hrefs)
 print(words, address)

 html = requests.get(address)
 content = etree.HTML(html.text)
 block = content.xpath('//*[@class="contentitem"]/div[3]')
 sub_str = re.sub(u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a-\u3000])", "", str(''.join(words)))  # 剔除非法字符
 filename = outPutDirName + sub_str + '.txt'

 for j in block:
  aaa = j.xpath('./p/text()')
  bbb = '\n'.join(aaa)
  bbb.strip()
  bbb.replace(' ', '')
  # print(bbb)
  file = open(filename , 'w')
  file.write(bbb)

 file.close()
 temp = temp + '\n'.join(words) + '</br>' + bbb + '</br>'
 temp.replace('\n', '</br>')
# 写成pdf
last_pdf = '<html><head><meta charset="UTF-8"></head>' \
       '<body><div align="center"><p>%s</p></div></body></html>' %temp

pdfkit.from_string(last_pdf, "./民间烧尸怪谈.pdf", configuration=config)


分享到 :
0 人收藏
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

积分:3875789
帖子:775174
精华:0
期权论坛 期权论坛
发布
内容

下载期权论坛手机APP