mercredi 29 mai 2019

Python code in Feedly at terminal logcat understand

I'm getting started with reptiles and trying to perform a second crawling exercise based on Feedly(Feedly RSS is the first time i think): don't open website to see news(just title、source link and update time)

This is my code, the first tried with python.

And i was think that should i need crawling my subscribe root websites and write a Scrapy and use MangoDB or Excel/CSV to do basic exercise?

In my this case, i was even think that i should developing a macOS App to show crawling result and i was designed the App UI, or use Python QT and learned QT, perhaps i'm anxiety a lot but nobody to talk.

I tried print links and title first, but feedly should tap twice that show Source page,so i choose Feedly‘s entry.

Python code:

def get_url(url):  # 获得每一页新闻的原始链接

    news_url = requests.get('urls',headers)

    soup = BeautifulSoup(news_url.text, 'lxml')

    links = soup.select('#.*?\=_16b.*?\:2.*?\:d02fd57c_entry_title')

    for link in links:
        href = link.get('href')
     print(href)

#if __name__ == '__main__':
#    urls = ['https://feedly.com/i/entry/'.format(number) for number in range(1,16)]
#    for url in urls:
#        get_url(url)
#        time.sleep(2)

logcat:

 File "aa2.py", line 17
    print(href)
              ^
IndentationError: unindent does not match any outer indentation level

I think i don't need use Feedly API,Because i was use chrome to see json info that only have title and sub read, but i was not tried, so i choose web crawling.

I found Feedly "check"(Use Chrome)json only have title and unread, no "update time" and category".

I found Feedly jeson was only have title and unread, no "update time" and category"

Here is my all code:


import requests
import time
from lxml import etree
from bs4 import BeautifulSoup


# import pymongo MongoDB数据库
# import xlwt EXCEL
# import csv CSV

headers = {
           'cookie': '',  # 传入cookies
           'user-agent': 'Mozilla/5.0 (iPad; CPU OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko)' 
           'Version/11.0 Mobile/15A5341f Safari/604.1'
}

def get_url(url):  # 获得每一页新闻的原始链接

    news_url = requests.get(url, headers)

    soup = BeautifulSoup(news_url.text, 'lxml')

    links = soup.select('#.*?\=_16b.*?\:2.*?\:d02fd57c_entry_title')

    for link in links:
        href = link.get('href')
        get_newsinfo(href)

    get_url(url)

def get_newsinfo(url):

    news_title = requests.get(url, headers)
    soup = BeautifulSoup(news_title.text,'lxml')

    news_titles = soup.select('#.*?\=_16b.*?\:2.*?\:d02fd57c_entry_title')

    news_links = soup.select('#.*?\=_16b.*?\:2.*?\:d02fd57c_entry_title')

    for link in  news_links:
        href = link.get('href')
        return href

    news_updatetimes = soup.select('#>*?\=_16b03.*?\:2.?*\:5de7e37_entryHolder > div.u100Entry > div.entryHeader > div.fx.metadata > span:nth-child(3)')

    for news_title, news_link, news_updatetime in zip(news_titles, news_links, news_updatetimes):
        data = {
            'NewsTitle':news_title.get_text(),
            'Updatetime':news_updatetime.get_text(),
            'NewsLink':news_link.get.text()
        }
        print(data)

    get_newsinfo(url)

if __name__ == '__main__':

    urls = ['https://feedly.com/i/entry/'.format(number) for number in range(1, 14)]

    for url in urls:
       get_url(url)
    time.sleep(2)

#    wbdata = []

#    newsList = ['ifanr', 'huxiu', '36kr']
#
#    if newsList[0] = wbdata:

 #       wbdata = requests.get('http://ifanr.com',headers)

# def news_title(url):

#    news_info = requests.get('https://feedly.com/v3/streams/contents?streamId=user%2F670ed6b0-dfb0-46c7-b688-8b33ba46dd8c%2Fcategory%2F%E7%A7%91%E6%8A%80&count=20&unreadOnly=true&ranked=newest&similar=true&continuation=16b01e292f7:3277d:5de7e37&ck=1559135018763&ct=feedly.desktop&cv=31.0.357', headers)

#    d = news_info.json()

#    artititles = d['title']
#    print(len(artititles))

#    for artititle in artititles:
#        yield artititle['title']


I hope news_title news_updatetime news_links.

Here is my logcat:
```bash
FelixdeMacBook-Pro:Desktop felixxiong$ python3 feedlynewsupdate.py
Traceback (most recent call last):
  File "feedlynewsupdate.py", line 61, in <module>
    get_url(url)
  File "feedlynewsupdate.py", line 23, in get_url
    links = soup.select('#.*?\=_16b.*?\:2.*?\:d02fd57c_entry_title')
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/bs4/element.py", line 1376, in select
    return soupsieve.select(selector, self, namespaces, limit, **kwargs)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/soupsieve/__init__.py", line 114, in select
    return compile(select, namespaces, flags, **kwargs).select(tag, limit)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/soupsieve/__init__.py", line 63, in compile
    return cp._cached_css_compile(pattern, namespaces, custom, flags)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/soupsieve/css_parser.py", line 214, in _cached_css_compile
    CSSParser(pattern, custom=custom_selectors, flags=flags).process_selectors(),
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/soupsieve/css_parser.py", line 1113, in process_selectors
    return self.parse_selectors(self.selector_iter(self.pattern), index, flags)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/soupsieve/css_parser.py", line 946, in parse_selectors
    key, m = next(iselector)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/soupsieve/css_parser.py", line 1100, in selector_iter
    raise SelectorSyntaxError(msg, self.pattern, index)
soupsieve.util.SelectorSyntaxError: Malformed id selector at position 0
  line 1:
#.*?\=_16b.*?\:2.*?\:d02fd57c_entry_title
^

Thank you!




Aucun commentaire:

Enregistrer un commentaire