I am practicing scraping website and inputting the information into a table. I imported the link, but I seem to be getting an error message based off the url.
JavaScript
x
64
64
1
from bs4 import BeautifulSoup
2
import requests
3
import pandas as pd
4
5
eastern_basketball_players = {'mens_basketball':'https://www.espn.com/nba/seasonleaders/_/league/east'}
6
7
def scraping_processing(season_leaders, url):
8
9
player = []
10
teamcity = []
11
games_played = []
12
minutes_per_game = []
13
points = []
14
espn_score = []
15
16
for link in url:
17
18
page = requests.get(link)
19
20
soup = BeautifulSoup(page.content, 'html.parser')
21
22
raw_names = soup.find_all('td', class_ = 'Player')
23
24
teams = soup.find_all('td', class_ = 'Team')
25
26
games = soup.find_all('td', class_ = 'Games Played')
27
28
minutes = soup.find_all('td', class_ = 'Minutes Per Game')
29
30
pts = soup.find_all('td', class_ = 'Points Per Game')
31
32
espnscores = soup.find_all('td', class_ = 'EPSN Rating')
33
34
for raw_name in raw_names:
35
player.append(raw_name.get_text().strip())
36
37
for team in teams:
38
teamcity.append(team.get_text().strip())
39
40
for game in games:
41
games_played.append(raw_name.get_text().strip())
42
43
for minute in minutes:
44
minutes_per_game.append(minute.get_text().strip())
45
46
for pt in pts:
47
points.append(pt.get_text().strip())
48
49
for espnscore in espnscores:
50
espn_score.append(espnscore.get_text().strip())
51
52
filename = season_leaders + '.csv'
53
df = pd.DataFrame()
54
df['Names'] = player
55
df['Teams'] = teamcity
56
df['Games Played'] = games_played
57
df['Minutes Per Game'] = minutes_per_game
58
df['Points'] = points
59
df['ESPN Scores'] = espn_score
60
df.to_csv(filename, index = False)
61
62
63
east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
64
The error message I received states:
JavaScript
1
55
55
1
MissingSchema Traceback (most recent call last)
2
<ipython-input-49-ca254e49e854> in <module>
3
----> 1 east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
4
5
<ipython-input-48-2f1a85c4b240> in scraping_processing(season_leaders, url)
6
10 for link in url:
7
11
8
---> 12 page = requests.get(link)
9
13
10
14 soup = BeautifulSoup(page.content, 'html.parser')
11
12
~anaconda3Pythonlibsite-packagesrequestsapi.py in get(url, params, **kwargs)
13
74
14
75 kwargs.setdefault('allow_redirects', True)
15
---> 76 return request('get', url, params=params, **kwargs)
16
77
17
78
18
19
~anaconda3Pythonlibsite-packagesrequestsapi.py in request(method, url, **kwargs)
20
59 # cases, and look like a memory leak in others.
21
60 with sessions.Session() as session:
22
---> 61 return session.request(method=method, url=url, **kwargs)
23
62
24
63
25
26
~anaconda3Pythonlibsite-packagesrequestssessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
27
526 hooks=hooks,
28
527 )
29
--> 528 prep = self.prepare_request(req)
30
529
31
530 proxies = proxies or {}
32
33
~anaconda3Pythonlibsite-packagesrequestssessions.py in prepare_request(self, request)
34
454
35
455 p = PreparedRequest()
36
--> 456 p.prepare(
37
457 method=request.method.upper(),
38
458 url=request.url,
39
40
~anaconda3Pythonlibsite-packagesrequestsmodels.py in prepare(self, method, url, headers, files, data, params, auth, cookies, hooks, json)
41
314
42
315 self.prepare_method(method)
43
--> 316 self.prepare_url(url, params)
44
317 self.prepare_headers(headers)
45
318 self.prepare_cookies(cookies)
46
47
~anaconda3Pythonlibsite-packagesrequestsmodels.py in prepare_url(self, url, params)
48
388 error = error.format(to_native_string(url, 'utf8'))
49
389
50
--> 390 raise MissingSchema(error)
51
391
52
392 if not host:
53
54
MissingSchema: Invalid URL 'h': No schema supplied. Perhaps you meant http://h?
55
I rechecked the url to run the code again, but I am still receiving this error message. Can someone please help me?
Advertisement
Answer
You need to either get rid of the loop, or your urls need to be in a list.
But more importantly, you need to go and review html and how to identify tags and attributes. There are not <td>
tags in the html with class="Player"
, or class="Team"
, etc.
Look at this code, and look at the html to see how it iterates through it.
JavaScript
1
30
30
1
from bs4 import BeautifulSoup
2
import requests
3
import pandas as pd
4
5
eastern_basketball_players = {'mens_basketball':['https://www.espn.com/nba/seasonleaders/_/league/east']}
6
7
def scraping_processing(season_leaders, url):
8
9
rows = []
10
11
for link in url:
12
page = requests.get(link)
13
14
soup = BeautifulSoup(page.content, 'html.parser')
15
16
colNames = soup.find('tr', class_='colhead')
17
colNames = [x.text for x in colNames.find_all('td')]
18
19
trs = soup.find_all('tr')
20
for row in trs:
21
if row['class'][0] not in ['colhead', 'stathead']:
22
rows.append([x.text for x in row.find_all('td')])
23
24
filename = season_leaders + '.csv'
25
df = pd.DataFrame(rows, columns=colNames)
26
df.to_csv(filename, index = False)
27
28
29
east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
30