爬取博客排名

爬取博客排名


'''
一个小爬虫案例
'''

import re
from urllib import request

class Spider():
    '''
    一个小爬虫类
    '''
    url = 'https://alexa.chinaz.com/Category/index_Arts_Weblogs.html'
    root_pattern = '<div class="righttxt">([\s\S]*?)</div>'
    name_pattern = '</a><span>([\s\S]*?)</span>'
    desc_pattern = '<p class="">([\s\S]*?)</p>'

    def __fetch_content(self):
        '''
        获取 html 内容
        '''        
        r = request.urlopen(Spider.url)
        htmls = r.read()
        htmls = str(htmls, encoding='utf-8')
        return htmls

    def __analysis(self, htmls):
        '''
        分析 html 内容
        '''        
        root_html = re.findall(Spider.root_pattern, htmls)

        anchors = []
        for html in root_html:
            name = re.findall(Spider.name_pattern, html)
            desc = re.findall(Spider.desc_pattern, html)
            anchor = {'name':name, 'desc':desc}
            anchors.append(anchor)

        return anchors    

    def __refine(self, anchors):
        l = lambda anchor: {
            'name':anchor['name'][0].strip(),
            'desc':anchor['desc'][0]
            }
        return map(l, anchors)

    def __show(self, anchors):
        for rank in range(0, len(anchors)):
            print('rank   ' + str(rank + 1)
            + ':    ' + anchors[rank]['name']
            + '   ' + anchors[rank]['desc']
            )

    def go(self):
        htmls = self.__fetch_content()
        anchors = self.__analysis(htmls)
        anchors = list(self.__refine(anchors))
        anchors = self.__show(anchors)

spider = Spider() 
spiders = spider.go()     
print(spiders)