Python开发简单爬虫
Python开发简单爬虫
源码网址: http://download.csdn.NET/detail/hanchaobiao/9860671
目前成都创新互联已为成百上千的企业提供了网站建设、域名、网络空间、网站托管、企业网站设计、杭州网站维护等服务,公司将坚持客户导向、应用为本的策略,正道将秉承"和谐、参与、激情"的文化,与客户和合作伙伴齐心协力一起成长,共同发展。
一、爬虫的简介及爬虫技术价值
1.什么是爬虫:
一段自动抓取互联网信息的程序,可以从一个URL出发,访问它所关联的URL,提取我们所需要的数据。也就是说爬虫是自动访问互联网并提取数据的程序。
入口:http://baike.baidu.com/item/Python
分析URL格式:防止访问无用路径 http://baike.baidu.com/item/{标题}
数据:抓取百度百科相关Python词条网页的标题和简介
通过审查元素得标题元素为 :class="lemmaWgt-lemmaTitle-title"
简介元素为:class="lemma-summary"
页面编码:UTF-8
作为定向爬虫网站要根据爬虫的内容升级而升级如运行出错可能为百度百科升级,此时则需要重新分析目标
代码集注释:
创建spider_main.py
[python] view plain copy
#创建类
from imooc.baike_spider import url_manager,html_downloader,html_output,html_parser
class spiderMain:
#构造函数 初始化
def __init__(self):
#实例化需引用的对象
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownLoader()
self.output = html_output.HtmlOutPut()
self.parser = html_parser.HtmlParser()
def craw(self,root_url):
#添加一个到url中
self.urls.add_new_url(root_url)
count = 1
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print('craw %d : %s' %(count,new_url))
#下载
html_context = self.downloader.downloade(new_url)
new_urls,new_data = self.parser.parse(new_url,html_context)
print(new_urls)
self.urls.add_new_urls(new_urls)
self.output.collect_data(new_data)
#爬一千个界面
if(count==1000):
break
count+=1
except:
print("craw faile")
self.output.output_html()
#创建main方法
if __name__ == "__main__":
root_url = "http://baike.baidu.com/item/Python"
obj_spider = spiderMain()
obj_spider.craw(root_url)
创建url_manager.py
[python] view plain copy
class UrlManager:
'url管理类'
#构造函数初始化set集合
def __init__(self):
self.new_urls = set() #待爬取的url
self.old_urls = set() #已爬取的url
#向管理器中添加一个新的url
def add_new_url(self,root_url):
if(root_url is None):
return
if(root_url not in self.new_urls and root_url not in self.old_urls):
#既不在待爬取的url也不在已爬取的url中,是一个全新的url,因此将其添加到new_urls
self.new_urls.add(root_url)
# 向管理器中添加批量新的url
def add_new_urls(self,urls):
if(urls is None or len(urls) == 0):
return
for url in urls:
self.add_new_url(url) #调用add_new_url()
#判断是否有新的待爬取的url
def has_new_url(self):
return len(self.new_urls) != 0
#获取一个待爬取的url
def get_new_url(self):
new_url = self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
创建html_downloader.py
[python] view plain copy from urllib import request from urllib.parse import quote import string class HtmlDownLoader: '下载页面内容' def downloade(self,new_url): if(new_url is None): return None #解决请求路径中含义中文或特殊字符 url_ = quote(new_url, safe=string.printable); response = request.urlopen(url_) if(response.getcode()!=200): return None #请求失败 html = response.read() return html.decode("utf8")
创建html_parser.py
[python] view plain copy
from bs4 import BeautifulSoup
import re
from urllib import parse
class HtmlParser:
#page_url 基本url 需拼接部分
def _get_new_urls(self,page_url,soup):
new_urls = set()
#匹配 /item/%E8%87%AA%E7%94%B1%E8%BD%AF%E4%BB%B6
links = soup.find_all('a',href=re.compile(r'/item/\w+'))
for link in links:
new_url = link["href"]
#例如page_url=http://baike.baidu.com/item/Python new_url=/item/史记·2016?fr=navbar
#则使用parse.urljoin(page_url,new_url)后 new_full_url = http://baike.baidu.com/item/史记·2016?fr=navbar
new_full_url = parse.urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self,page_url,soup):
#
Python
red_data = {}
red_data['url'] = page_url
title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find('h2') #获取标题内容
red_data['title'] = title_node.get_text()
#
summary_node = soup.find('div',class_="lemma-summary")
red_data['summary'] = summary_node.get_text()
return red_data
#new_url路径 html_context界面内容
def parse(self,page_url, html_context):
if(page_url is None or html_context is None):
return
#python3缺省的编码是unicode, 再在from_encoding设置为utf8, 会被忽视掉,去掉【from_encoding = "utf-8"】这一个好了
soup = BeautifulSoup(html_context, "html.parser")
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls,new_data
创建html_output.py
[python] view plain copy
class HtmlOutPut:
def __init__(self):
self.datas = [] #存放搜集的数据
def collect_data(self,new_data):
if(new_data is None):
return
self.datas.append(new_data)
def output_html(self):
fout = open('output.html','w',encoding='utf8') #写入文件 防止中文乱码
fout.write('\n')
fout.write('
\n')fout.write('
\n')
\n')for data in self.datas:
fout.write('
\n') \n')fout.write('
%s \n'%data['url'])fout.write('
%s \n'%data['title'])fout.write('
%s \n'%data['summary'])fout.write('
fout.write('
fout.write('\n')
fout.write('\n')
fout.close()
视频网站:http://www.imooc.com/learn/563源码网址:http://download.csdn.Net/detail/hanchaobiao/9860671
文章标题:Python开发简单爬虫
分享地址:http://ybzwz.com/article/gjooge.html