Mathias : Différence entre versions
Ligne 1 : | Ligne 1 : | ||
<syntaxhighlight lang="python"> | <syntaxhighlight lang="python"> | ||
+ | |||
#la librairie qui permet de se connecter au site | #la librairie qui permet de se connecter au site | ||
import mechanize | import mechanize | ||
Ligne 8 : | Ligne 9 : | ||
import ssl | import ssl | ||
− | + | import json | |
Ligne 49 : | Ligne 50 : | ||
#print(resultats) | #print(resultats) | ||
+ | def extractText(textTag): | ||
+ | #print(textTag.text_content()) | ||
+ | elements = textTag.xpath(".//h2/span[@class='mw-headline']|.//div[@class='mw-parser-output']/p") | ||
+ | article = '' | ||
+ | for element in elements: | ||
+ | article = article + element.text_content() + '\n' | ||
− | + | return article | |
Ligne 87 : | Ligne 94 : | ||
nextUrl = siteUrl+nextPrevLinkTag.get('href') | nextUrl = siteUrl+nextPrevLinkTag.get('href') | ||
break | break | ||
+ | |||
+ | break | ||
print(articlesLinks) | print(articlesLinks) | ||
+ | |||
+ | #[ | ||
+ | # { | ||
+ | # 'title':'', | ||
+ | # 'text':''} | ||
+ | # , | ||
+ | # { | ||
+ | # 'title':'', | ||
+ | # 'text':''} | ||
+ | # ] | ||
+ | |||
+ | articles = [] | ||
+ | counter = 0 | ||
+ | for articleLink in articlesLinks: | ||
+ | article = {'title':'', 'text':''} | ||
+ | |||
+ | src = getSrc(articleLink) | ||
+ | |||
+ | titleTags = getHtmlElements(src, 'h1#firstHeading') | ||
+ | |||
+ | try: | ||
+ | article['title'] = titleTags[0].text_content() | ||
+ | except: | ||
+ | print("erreur") | ||
+ | |||
+ | |||
+ | textTag = getHtmlElements(src, '.mw-parser-output') | ||
+ | |||
+ | article['text'] = extractText(textTag[0]) | ||
+ | |||
+ | articles.append(article) | ||
+ | |||
+ | counter += 1 | ||
+ | if(counter == 4): | ||
+ | break | ||
+ | |||
+ | |||
+ | print(articles) | ||
+ | |||
+ | articlesJSON = json.dumps(articles) | ||
+ | |||
+ | with open('output.json', 'w') as f: | ||
+ | f.write(articlesJSON) | ||
+ | |||
</syntaxhighlight > | </syntaxhighlight > |
Version du 16 décembre 2022 à 14:19
#la librairie qui permet de se connecter au site
import mechanize
#la librairie qui permet d'analyser la structure html
import lxml.html
#la librairie qui permet de sélectionner une info dans la structure html
import cssselect
import ssl
import json
def getSrc(url):
#1. télécharger la source html de la page url
browser = mechanize.Browser()
browser.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
browser.set_handle_robots(False)
#problème https
browser.set_ca_data(context=ssl._create_unverified_context(cert_reqs=ssl.CERT_NONE))
data = browser.open(url, timeout=10)
'''with open("output.txt", 'w') as f:
f.write(str(data.read()))'''
#2. analyser la source et la transformer en structure html navigable
source = data.read()
html = lxml.html.fromstring(source)
return html
def getHtmlElements(html, selecteurCss):
#3. viser l'information qui nous intéresse
#convertir mon sélecteur css en objet cssselect
selecteurs = cssselect.parse(selecteurCss)
#0
#[objet cssselect]
selecteur = selecteurs[0]
chemin_xpath = cssselect.HTMLTranslator().selector_to_xpath(selecteur)
resultats = html.xpath(chemin_xpath)
return resultats
#print(resultats)
def extractText(textTag):
#print(textTag.text_content())
elements = textTag.xpath(".//h2/span[@class='mw-headline']|.//div[@class='mw-parser-output']/p")
article = ''
for element in elements:
article = article + element.text_content() + '\n'
return article
siteUrl = 'https://www.lostmediawiki.com'
start = True
articlesLinks = []
nextUrl = 'https://www.lostmediawiki.com/index.php?title=Category:Completely_lost_media'
while nextUrl != False:
print('######NEXT PAGE!############')
print('#GOING TO'+nextUrl)
src = getSrc(nextUrl)
nextUrl = False
articlesLinksTags = getHtmlElements(src, ".mw-category-group a")
for articleLinkTag in articlesLinksTags:
#print(siteUrl+articleLinkTag.get('href'))
articlesLinks.append(siteUrl+articleLinkTag.get('href'))
nextPrevLinksTags = getHtmlElements(src, '#mw-pages > a')
for nextPrevLinkTag in nextPrevLinksTags:
print(nextPrevLinkTag.text_content())
if nextPrevLinkTag.text_content() == 'next page':
nextUrl = siteUrl+nextPrevLinkTag.get('href')
break
break
print(articlesLinks)
#[
# {
# 'title':'',
# 'text':''}
# ,
# {
# 'title':'',
# 'text':''}
# ]
articles = []
counter = 0
for articleLink in articlesLinks:
article = {'title':'', 'text':''}
src = getSrc(articleLink)
titleTags = getHtmlElements(src, 'h1#firstHeading')
try:
article['title'] = titleTags[0].text_content()
except:
print("erreur")
textTag = getHtmlElements(src, '.mw-parser-output')
article['text'] = extractText(textTag[0])
articles.append(article)
counter += 1
if(counter == 4):
break
print(articles)
articlesJSON = json.dumps(articles)
with open('output.json', 'w') as f:
f.write(articlesJSON)