本文主要是介绍Python2.7 -- 爬虫之百度贴吧,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
源码参考;https://cuiqingcai.com/993.html
使用python,写爬虫,知识含量满满
涉及:
- 调用URL模块
- 正则表达式模块
- 文件流读写模块
代码如下粘贴出来直接可以运行:
# -*- coding:utf-8 -*-
__author__ = 'GP'
import urllib
import urllib2
import re#处理页面标签类
class Tool:#去除img标签,7位长空格removeImg = re.compile('<img.*?>| {7}|')#删除超链接标签removeAddr = re.compile('<a.*?>|</a>')#把换行的标签换为\nreplaceLine = re.compile('<tr>|<div>|</div>|</p>')#将表格制表<td>替换为\treplaceTD= re.compile('<td>')#把段落开头换为\n加空两格replacePara = re.compile('<p.*?>')#将换行符或双换行符替换为\nreplaceBR = re.compile('<br><br>|<br>')#将其余标签剔除removeExtraTag = re.compile('<.*?>')def replace(self,x):x = re.sub(self.removeImg,"",x)x = re.sub(self.removeAddr,"",x)x = re.sub(self.replaceLine,"\n",x)x = re.sub(self.replaceTD,"\t",x)x = re.sub(self.replacePara,"\n ",x)x = re.sub(self.replaceBR,"\n",x)x = re.sub(self.removeExtraTag,"",x)#strip()将前后多余内容删除return x.strip()#百度贴吧爬虫类
class BDTB:#初始化,传入基地址,是否只看楼主的参数def __init__(self,baseUrl,seeLZ,floorTag):#base链接地址self.baseURL = baseUrl#是否只看楼主self.seeLZ = '?see_lz='+str(seeLZ)#HTML标签剔除工具类对象self.tool = Tool()#全局file变量,文件写入操作对象self.file = None#楼层标号,初始为1self.floor = 1#默认的标题,如果没有成功获取到标题的话则会用这个标题self.defaultTitle = u"百度贴吧"#是否写入楼分隔符的标记self.floorTag = floorTag#传入页码,获取该页帖子的代码def getPage(self,pageNum):try:#构建URLurl = self.baseURL+ self.seeLZ + '&pn=' + str(pageNum)request = urllib2.Request(url)response = urllib2.urlopen(request)#返回UTF-8格式编码内容return response.read().decode('utf-8')#无法连接,报错except urllib2.URLError, e:if hasattr(e,"reason"):print u"连接百度贴吧失败,错误原因",e.reasonreturn None#获取帖子标题def getTitle(self,page):#得到标题的正则表达式pattern = re.compile('<h1 class="core_title_txt.*?>(.*?)</h1>',re.S)result = re.search(pattern,page)if result:#如果存在,则返回标题return result.group(1).strip()else:return None#获取帖子一共有多少页def getPageNum(self,page):#获取帖子页数的正则表达式pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)result = re.search(pattern,page)if result:return result.group(1).strip()else:return None#获取每一层楼的内容,传入页面内容def getContent(self,page):#匹配所有楼层的内容pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)items = re.findall(pattern,page)contents = []for item in items:#将文本进行去除标签处理,同时在前后加入换行符content = "\n"+self.tool.replace(item)+"\n"contents.append(content.encode('utf-8'))return contentsdef setFileTitle(self,title):#如果标题不是为None,即成功获取到标题if title is not None:self.file = open(title + ".txt","w+")else:self.file = open(self.defaultTitle + ".txt","w+")def writeData(self,contents):#向文件写入每一楼的信息for item in contents:if self.floorTag == '1':#楼之间的分隔符floorLine = "\n" + str(self.floor) + u"-----------------------------------------------------------------------------------------\n"self.file.write(floorLine)self.file.write(item)self.floor += 1def start(self):indexPage = self.getPage(1)pageNum = self.getPageNum(indexPage)title = self.getTitle(indexPage)self.setFileTitle(title)if pageNum == None:print "URL已失效,请重试"returntry:print "该帖子共有" + str(pageNum) + "页"for i in range(1,int(pageNum)+1):print "正在写入第" + str(i) + "页数据"page = self.getPage(i)contents = self.getContent(page)self.writeData(contents)#出现写入异常except IOError,e:print "写入异常,原因" + e.messagefinally:print "写入任务完成"print u"请输入帖子代号"
baseURL = 'http://tieba.baidu.com/p/' + str(raw_input(u'http://tieba.baidu.com/p/'))
seeLZ = raw_input("是否输入否只获取楼主发言,是输入1,0\n")
floorTag = raw_input("是否写入楼层信息,是输入1,否输入0\n")
bdtb = BDTB(baseURL,seeLZ,floorTag)
bdtb.start()
这篇关于Python2.7 -- 爬虫之百度贴吧的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!