|
想爬取小木虫首页那些板块的名字(似乎是个很简单的项目....)
写了如下代码
为何既没有我create的文件,又没有爬取结果
请大侠们给出修改意见==
谢谢!!
import requests
from requests.excepti** import RequestException
import re
import json
url = 'http://muchong.com/bbs/post.php?action=newthread'
def getpage(url):
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
resp**e = requests.get(url,headers=headers)
if resp**e.status_code == 200:
return resp**e.text
return None
except RequestException:
return None
def parse(html):
html = getpage(url)
pattern = re.compile('<td width="25%">.*?xmc_blue xmc_ft14.*?target="_blank">(.*?)</a></h4>',re.S)
items = re.findall(pattern,html)
write_to_file(items)
print(items)
def write_to_file(content):
with open('xiaomuchong.txt', 'a', encoding='utf-8') as f:#a代表追加
f.write(json.dumps(content, ensure_ascii=False) + '\n')#.想输出真正的中文需要指定ensure_ascii=False
if __name__ == '__main__':
main()
谢谢,初学python,希望大家指点!! |
|
|