https://blog.csdn.net/minge89/article/details/102978507
下载地址
百度云:
链接: https://pan.baidu.com/s/1pCqptL6QwnP2eUeyAABnYA 提取码: sxca 需知:
exe程序运行环境为win7 64位操作系统! 提示:
不一定保证格式内容及图片完整性! 附上主要python源码: #微信文章页采集# -*- coding: UTF-8 -*-import requestsimport re,time,osfrom bs4 import BeautifulSoupfrom baocun import bctp,bcwbfrom fake_useragent import UserAgentdef ua(): ua = UserAgent() headers = {"User-Agent": ua.random} return headersdef get_content(url): headers=ua() respnese=requests.get(url,headers=headers).text soup=BeautifulSoup(respnese,'lxml') #获取标题 h2=soup.find('h2',class_="rich_media_title").get_text() h2=h2.replace('\n','') h2 = h2.replace(' ', '') h2 = re.sub(r'[\|\/\<\>\:\*\?\\\"]', "_", h2) # 剔除不合法字符 print(f'微信公众号文章标题:{h2}') os.makedirs(f'weixin/{h2}/',exist_ok=True) lj =f'weixin/{h2}/' ljj=f'weixin/{h2}/{h2}.txt' author=soup.find('div',class_="rich_media_meta_list").find('a',id="js_name").get_text() author = author.replace('\n', '') author = author.replace(' ', '') author=f'来源:{author}' print(author) i=1 text='' ps=soup.find('div',class_="rich_media_content").find_all('p') for p in ps: if "img" in str(p): try: img_url=p.find('img')['data-src' print(img_url) if "jpeg" == img_url[-4:: img_name=f'{i}.{img_url[-4:]}' else: img_name = f'{i}.{img_url[-3:]}' bctp(lj, img_url, img_name) p_content=img_name i=i+1 except Exception as e: print(f"获取图片数据失败,错误代码:{e}") pass else: p_content=p.get_text() text = '%s%s%s' % (text, '\n',p_content) texts='%s%s%s%s%s'%(h2,'\n',author,'\n',text) print(texts) bcwb(ljj, texts)if __name__ == '__main__': url=input("请输入要采集的微信公众号文章链接:") print(f'爬虫启动中,请稍后......') get_content(url) print(f'采集完毕,程序5s后自动关闭!') time.sleep(5)- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
附改进 完整内容抓取,无格式 #微信公众号文章 完整版内容抓取def cs(url): headers = ua() respnese = requests.get(url, headers=headers).text soup = BeautifulSoup(respnese, 'lxml') get_article(soup)def get_article(soup): # 获取标题 h2 = soup.find('h2', class_="rich_media_title").get_text() h2 = h2.replace('\n', '') h2 = h2.replace(' ', '') h2 = re.sub(r'[\|\/\<\>\:\*\?\\\"]', "_", h2) # 剔除不合法字符 print(f'微信公众号文章标题:{h2}') os.makedirs(f'weixin/{h2}/', exist_ok=True) lj = f'weixin/{h2}/' ljj = f'weixin/{h2}/{h2}.txt' author = soup.find('div', class_="rich_media_meta_list").find('a', id="js_name").get_text() author = author.replace('\n', '') author = author.replace(' ', '') author = f'来源:{author}' print(author) # 获取文字内容 texts = soup.find('div', class_="rich_media_content").get_text() texts = '\n'.join(texts.split('。')) # 以句号 分割文本 print(texts) texts = '%s%s%s%s%s' % (h2, '\n', author, '\n', texts) print(texts) bcwb(ljj, texts) # 获取所有图片 i = 1 imgs = soup.find('div', class_="rich_media_content").find_all('img') for img in imgs: img_url = img['data-src' print(img_url) if "jpeg" == img_url[-4:: img_name = f'{i}.{img_url[-4:]}' else: img_name = f'{i}.{img_url[-3:]}' bctp(lj, img_url, img_name) i = i + 1- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
源码应用到的保存模块 #存储内容import requestsimport timefrom fake_useragent import UserAgentdef ua(): ua = UserAgent() headers = {"User-Agent": ua.random} return headers#下载图片def bctp(lj,img_url,img_name): print("开始下载图片!") try: r = requests.get(img_url,headers=ua(),timeout=5) with open(f'{lj}/{img_name}', 'wb') as f: f.write(r.content) print(f'下载{img_name}图片成功!') time.sleep(1) except Exception as e: if "port=443): Read timed out" in str(e): time.sleep(2) try: r = requests.get(img_url, headers=ua(),timeout=5) with open(f'{lj}/{img_name}', 'wb') as f: f.write(r.content) print(f'下载{img_name}图片成功!') except Exception as e: print(f'下载{img_name}图片失败!') print(f'错误代码:{e}') with open(f'{lj}/spider.txt', 'a+', encoding='utf-8') as f: f.write(f'错误代码:{e}---下载 {img_url} 图片失败\n') else: print(f'下载{img_name}图片失败!') print(f'错误代码:{e}') with open(f'{lj}/spider.txt', 'a+', encoding='utf-8') as f: f.write(f'错误代码:{e}---下载 {img_url} 图片失败\n')#保存文本内容def bcwb(ljj,texts): print("开始保存文本") with open(ljj, 'w', encoding='utf-8') as f: f.write(texts) print(f'保存文本内容成功!')
|