Skip to content
Advertisement

Scraping multiple website data from a table

I am practicing scraping website and inputting the information into a table. I imported the link, but I seem to be getting an error message based off the url.

from bs4 import BeautifulSoup
import requests
import pandas as pd

eastern_basketball_players = {'mens_basketball':'https://www.espn.com/nba/seasonleaders/_/league/east'}

def scraping_processing(season_leaders, url):
    
    player = []
    teamcity = []
    games_played = []
    minutes_per_game = []
    points = []
    espn_score = []
    
    for link in url:
        
        page = requests.get(link)
        
        soup = BeautifulSoup(page.content, 'html.parser')
        
        raw_names = soup.find_all('td', class_ = 'Player')
        
        teams = soup.find_all('td', class_ = 'Team')
        
        games = soup.find_all('td', class_ = 'Games Played')
        
        minutes = soup.find_all('td', class_ = 'Minutes Per Game')
        
        pts = soup.find_all('td', class_ = 'Points Per Game')
        
        espnscores = soup.find_all('td', class_ = 'EPSN Rating')
        
        for raw_name in raw_names:
            player.append(raw_name.get_text().strip())
            
        for team in teams:
            teamcity.append(team.get_text().strip())
            
        for game in games:
            games_played.append(raw_name.get_text().strip())
            
        for minute in minutes:
            minutes_per_game.append(minute.get_text().strip())
            
        for pt in pts:
            points.append(pt.get_text().strip())
            
        for espnscore in espnscores:
            espn_score.append(espnscore.get_text().strip())
     
    filename = season_leaders + '.csv'
    df = pd.DataFrame()
    df['Names'] = player
    df['Teams'] = teamcity
    df['Games Played'] = games_played
    df['Minutes Per Game'] = minutes_per_game
    df['Points'] = points
    df['ESPN Scores'] = espn_score
    df.to_csv(filename, index = False)
    

east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])

The error message I received states:

MissingSchema                             Traceback (most recent call last)
<ipython-input-49-ca254e49e854> in <module>
----> 1 east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])

<ipython-input-48-2f1a85c4b240> in scraping_processing(season_leaders, url)
     10     for link in url:
     11 
---> 12         page = requests.get(link)
     13 
     14         soup = BeautifulSoup(page.content, 'html.parser')

~anaconda3Pythonlibsite-packagesrequestsapi.py in get(url, params, **kwargs)
     74 
     75     kwargs.setdefault('allow_redirects', True)
---> 76     return request('get', url, params=params, **kwargs)
     77 
     78 

~anaconda3Pythonlibsite-packagesrequestsapi.py in request(method, url, **kwargs)
     59     # cases, and look like a memory leak in others.
     60     with sessions.Session() as session:
---> 61         return session.request(method=method, url=url, **kwargs)
     62 
     63 

~anaconda3Pythonlibsite-packagesrequestssessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
    526             hooks=hooks,
    527         )
--> 528         prep = self.prepare_request(req)
    529 
    530         proxies = proxies or {}

~anaconda3Pythonlibsite-packagesrequestssessions.py in prepare_request(self, request)
    454 
    455         p = PreparedRequest()
--> 456         p.prepare(
    457             method=request.method.upper(),
    458             url=request.url,

~anaconda3Pythonlibsite-packagesrequestsmodels.py in prepare(self, method, url, headers, files, data, params, auth, cookies, hooks, json)
    314 
    315         self.prepare_method(method)
--> 316         self.prepare_url(url, params)
    317         self.prepare_headers(headers)
    318         self.prepare_cookies(cookies)

~anaconda3Pythonlibsite-packagesrequestsmodels.py in prepare_url(self, url, params)
    388             error = error.format(to_native_string(url, 'utf8'))
    389 
--> 390             raise MissingSchema(error)
    391 
    392         if not host:

MissingSchema: Invalid URL 'h': No schema supplied. Perhaps you meant http://h?

I rechecked the url to run the code again, but I am still receiving this error message. Can someone please help me?

Advertisement

Answer

You need to either get rid of the loop, or your urls need to be in a list.

But more importantly, you need to go and review html and how to identify tags and attributes. There are not <td> tags in the html with class="Player", or class="Team", etc.

Look at this code, and look at the html to see how it iterates through it.

from bs4 import BeautifulSoup
import requests
import pandas as pd

eastern_basketball_players = {'mens_basketball':['https://www.espn.com/nba/seasonleaders/_/league/east']}

def scraping_processing(season_leaders, url):
    
    rows = []
    
    for link in url:
        page = requests.get(link)
        
        soup = BeautifulSoup(page.content, 'html.parser')
        
        colNames = soup.find('tr', class_='colhead')
        colNames = [x.text for x in colNames.find_all('td')]
        
        trs = soup.find_all('tr')
        for row in trs:
            if row['class'][0] not in ['colhead', 'stathead']:
                rows.append([x.text for x in row.find_all('td')])
            
    filename = season_leaders + '.csv'
    df = pd.DataFrame(rows, columns=colNames)
    df.to_csv(filename, index = False)
    

east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
User contributions licensed under: CC BY-SA
8 People found this is helpful
Advertisement