python 爬取百度文库并下载(免费文章限定)

(编辑:jimmy 日期: 2024/12/29 浏览:2)

import requests
import re
import json
import os

session = requests.session()


def fetch_url(url):
 return session.get(url).content.decode('gbk')


def get_doc_id(url):
 return re.findall('view/(.*).html', url)[0]


def parse_type(content):
 return re.findall(r"docType.*", content)[0]


def parse_title(content):
 return re.findall(r"title.*", content)[0]


def parse_doc(content):
 result = ''
 url_list = re.findall('(https.*"\\\\\\/", "/") for addr in url_list]
 for url in url_list[:-5]:
  content = fetch_url(url)
  y = 0
  txtlists = re.findall('"c":"(.*".*"y":(.*"md5sum":"(.*"', content)[0]
 pn = re.findall('"totalPageNum":"(.*"', content)[0]
 rsign = re.findall('"rsign":"(.*"', content)[0]
 content_url = 'https://wkretype.bdimg.com/retype/text/' + doc_id + '"https://wenku.baidu.com/browse/getbcsurl" + doc_id + "&pn=1&rn=99999&type=ppt"
 content = fetch_url(content_url)
 url_list = re.findall('{"zoom":"(.*","page"', content)
 url_list = [item.replace("\\", '') for item in url_list]
 if not os.path.exists(doc_id):
  os.mkdir(doc_id)
 for index, url in enumerate(url_list):
  content = session.get(url).content
  path = os.path.join(doc_id, str(index) + '.jpg')
  with open(path, 'wb') as f:
   f.write(content)
 print("图片保存在" + doc_id + "文件夹")


def save_file(filename, content):
 with open(filename, 'w', encoding='utf8') as f:
  f.write(content)
  print('已保存为:' + filename)


# test_txt_url = 'https://wenku.baidu.com/view/cbb4af8b783e0912a3162a89.html"__main__":
 main()

爬取结果

python 爬取百度文库并下载(免费文章限定)

以上就是python 爬取百度文库并以下载的详细内容,更多关于python 爬取百度文库的资料请关注其它相关文章!

一句话新闻

Windows上运行安卓你用过了吗
在去年的5月23日,借助Intel Bridge Technology以及Intel Celadon两项技术的驱动,Intel为PC用户带来了Android On Windows(AOW)平台,并携手国内软件公司腾讯共同推出了腾讯应用宝电脑版,将Windows与安卓两大生态进行了融合,PC的使用体验随即被带入到了一个全新的阶段。