Update README.md
Browse files
README.md
CHANGED
@@ -28,9 +28,56 @@ from bs4 import BeautifulSoup
|
|
28 |
from urllib.request import urlopen
|
29 |
|
30 |
urls = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
```
|
32 |
|
33 |
About the Plena Inclusión España data, we are using the news as well.
|
34 |
In this case, they are in the url https://www.plenainclusion.org/noticias.
|
35 |
We are getting the links to all the news directed from there using BeautifulSoup, but in this case we have to scroll through the 194 pages and get the links from the "elementor-post__read-more" class.
|
36 |
-
Then, we are looking for the "post-lectura-dificil" class, which has the easy-to-read text, and the "articleBody" class, which has the original text, and we are saving them in two txt files as we can see in the following code.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
from urllib.request import urlopen
|
29 |
|
30 |
urls = []
|
31 |
+
url ="https://www.plenainclusionlarioja.org/actualidad/noticias/"
|
32 |
+
page = urlopen(url).read()
|
33 |
+
html = page.decode("utf-8")
|
34 |
+
soup = BeautifulSoup(html, 'html.parser')
|
35 |
+
mydivs = soup.find_all("a", {'class':"btn btn-secondary"})
|
36 |
+
for i in range(len(mydivs)):
|
37 |
+
nombre = str(mydivs[i]).split('/actualidad/noticias/')[1].split('"')[0]
|
38 |
+
urls.append(url+nombre)
|
39 |
+
|
40 |
+
for enlace in urls:
|
41 |
+
page = urlopen(enlace).read()
|
42 |
+
html = page.decode("utf-8")
|
43 |
+
soup = BeautifulSoup(html, 'html.parser')
|
44 |
+
mydivs = soup.find_all("div", {"class": "lecturafacil_texto"})
|
45 |
+
if str(mydivs)!='[]':
|
46 |
+
mydivsComplejo = soup.find_all("div", {"itemprop": "articleBody"})
|
47 |
+
with open('./lecturaFacil/lf-'+enlace.split('/')[-1]+'.txt', 'w') as f:
|
48 |
+
f.write(str(mydivs))
|
49 |
+
with open('./lecturaCompleja/lc-'+enlace.split('/')[-1]+'.txt', 'w') as f:
|
50 |
+
f.write(str(mydivsComplejo))
|
51 |
```
|
52 |
|
53 |
About the Plena Inclusión España data, we are using the news as well.
|
54 |
In this case, they are in the url https://www.plenainclusion.org/noticias.
|
55 |
We are getting the links to all the news directed from there using BeautifulSoup, but in this case we have to scroll through the 194 pages and get the links from the "elementor-post__read-more" class.
|
56 |
+
Then, we are looking for the "post-lectura-dificil" class, which has the easy-to-read text, and the "articleBody" class, which has the original text, and we are saving them in two txt files as we can see in the following code.
|
57 |
+
|
58 |
+
```
|
59 |
+
enlaces = []
|
60 |
+
for i in range(194):
|
61 |
+
url = "https://www.plenainclusion.org/noticias/?sf_paged="+str(i)
|
62 |
+
page = urlopen(url).read()
|
63 |
+
html = page.decode("utf-8")
|
64 |
+
soup = BeautifulSoup(html, 'html.parser')
|
65 |
+
mydivs = soup.find_all("a", {'class':"elementor-post__read-more"})
|
66 |
+
for i in range(len(mydivs)):
|
67 |
+
enlaces.append(str(mydivs[i]).split('href="')[1].split('"')[0])
|
68 |
+
|
69 |
+
for en in enlaces:
|
70 |
+
page = urlopen(en).read()
|
71 |
+
html = page.decode("utf-8")
|
72 |
+
soup = BeautifulSoup(html, 'html.parser')
|
73 |
+
mydivs = soup.find_all("p", {'class':"enlace-lectura-dificil"})
|
74 |
+
if str(mydivs)!='[]':
|
75 |
+
nombre = en.split('/')[-2]
|
76 |
+
lf = soup.find_all("section", {"itemprop":"articleBody"})
|
77 |
+
lf = str(lf).split("<figure")[0]
|
78 |
+
lc = soup.find_all("section", {"class":"post-lectura-dificil"})
|
79 |
+
with open('./plenaInclusionEspaña/lecturaFacil/lf-'+nombre+'.txt', 'w') as f:
|
80 |
+
f.write(lf)
|
81 |
+
with open('./plenaInclusionEspaña/lecturaCompleja/lc-'+nombre+'.txt', 'w') as f:
|
82 |
+
f.write(str(lc))
|
83 |
+
```
|