TransWikia.com

Python - BeautifulSoup - Scraped content only being written to first text file, not subsequent files

Stack Overflow Asked by Brendan Rodgers on December 23, 2021

I am currently using the code below to scrape data from sports schedule sites and output the information to text files. Currently with the code I have, the data correctly prints to the console and data from the first URL (https://sport-tv-guide.live/live/darts) is outputted to the text file as expected.

The problem is that the content from the second URL (https://sport-tv-guide.live/live/boxing/) is not outputted to the expected text file( the text file is created but there is no content in it).

The code I am using is below:

import requests
import time
from bs4 import BeautifulSoup

def makesoup(url):
    cookies = {'mycountries' : '101,28,3,102,42,10,18,4,2,22', 'user_time_zone': 'Europe/London',  'user_time_zone_id': '1'} 
    r = requests.post(url,  cookies=cookies)
    return BeautifulSoup(r.text,"lxml")
   
def linkscrape(links, savefile):
    baseurl = "https://sport-tv-guide.live"
    urllist = []
    
    for link in links:
        finalurl = (baseurl+ link['href'])
        urllist.append(finalurl)
        # print(finalurl)
        
    for singleurl in urllist:
        soup2=makesoup(url=singleurl)
        g_data=soup2.find_all('div', {'id': 'channelInfo'})
        c_data=soup2.find_all('div', {'class': 'liveOtherStations clearfix'})
    
    with open(savefile ,"w") as text_file:
        
        for match in g_data:
            try:
                hometeam =  match.find_previous('div', class_='cell40 text-center teamName1').text.strip()
                awayteam =  match.find_previous('div', class_='cell40 text-center teamName2').text.strip()
                print("Competitors; ", hometeam +" "+ "vs" +" "+ awayteam)
            except:
                hometeam = "Home Team element not found"
                awayteam = "Away Team element not found"
            try:
                startime =  match.find('div', class_='time full').text.strip()
                print("Time; ", startime) 
            except:
                startime = "Time element not found"
            try:
                event=  match.find('div', class_='title full').text.strip()
                print("Event:",  event)
            except:
                event = "Event element not found"
            try:
                dateandtime = match.find('div', class_='date full').text.strip()
                print("Date:",  dateandtime)
            except:
                dateandtime = "Date not found"
            try:
                sport = match.find('div', class_='text full').text.strip()
                print("Sport:",  sport)
            except:
                sport = "Sport element not found"
            try:
                singlechannel = match.find('div', class_='station full').text.strip()
                print("Main Channel:",  singlechannel)
                print("-----")
            except:
                singlechannel = "Single Channel element not found"
            for channel in c_data:
                try:
                    channels = match.find('div', class_='stationLive active col-wrap')
                    print("Extra Channels:",  channel.text)
                except:
                    channels = "No channels found"
                    print(channels)
                print("-------")
                
                text_file.writelines("__**Sport:**__" +':' + ' '+ sport +" n"+"__**Competitors:**__" +':' + ' '+ hometeam + awayteam + event+" n"+"__**Match Date:**__" +':' + ' ' +dateandtime +" n"+"__**Match Time:**__"+':' + ' ' +startime +" n"+ "__**Main Channel**__"+':' + ' '+singlechannel+" n" + "__**Channels**__"+':' + ' '+channel.text+" n"+'-' *20 + " n")


            
def matches():
    
    dict = {"https://sport-tv-guide.live/live/darts/":"/home/brendan/Desktop/testing,txt",  
"https://sport-tv-guide.live/live/boxing/":"/home/brendan/Desktop/boxing.txt"}

    for key,  value  in dict.items():
        soup=makesoup(url = key)
        linkscrape(links= soup.find_all('a', {'class': 'article flag',  'href' : True}) , savefile = value)
        
matches()

Below is an image showing the output I am printing to the console, which is showing correctly.
output

I am thinking it is possible there is an issue with the while loop position for opening the text file, causing it to be created, but the actual .writelines function not being run correctly after the first text file is successfully created. I have tried intending all the code starting from the while loop but this had no effect on the output.

Unfortunately I am unsure how to proceed from here.

Thank you to anyone who can provide assistance or solve this problem.

One Answer

Found the problem. In your code, for boxing url - https://sport-tv-guide.live/live/boxing/ there are no extra channels. Hence, the control won't go inside the loop and there is no output written to file.

You can collect all the extra channels in a list and then write to file

import requests
import time
from bs4 import BeautifulSoup

def makesoup(url):
    cookies = {'mycountries' : '101,28,3,102,42,10,18,4,2,22', 'user_time_zone': 'Europe/London',  'user_time_zone_id': '1'} 
    r = requests.post(url,  cookies=cookies)
    return BeautifulSoup(r.text,"lxml")
   
def linkscrape(links, savefile):
    baseurl = "https://sport-tv-guide.live"
    urllist = []
    print(savefile)
    for link in links:
        finalurl = (baseurl+ link['href'])
        urllist.append(finalurl)
        # print(finalurl)
        
    for singleurl in urllist:
        soup2=makesoup(url=singleurl)
        g_data=soup2.find_all('div', {'id': 'channelInfo'})
        c_data=soup2.find_all('div', {'class': 'liveOtherStations clearfix'})
    
    with open(savefile ,"w") as text_file:
        
        for match in g_data:
            try:
                hometeam =  match.find_previous('div', class_='cell40 text-center teamName1').text.strip()
                awayteam =  match.find_previous('div', class_='cell40 text-center teamName2').text.strip()
                print("Competitors; ", hometeam +" "+ "vs" +" "+ awayteam)
            except:
                hometeam = "Home Team element not found"
                awayteam = "Away Team element not found"
            try:
                startime =  match.find('div', class_='time full').text.strip()
                print("Time; ", startime) 
            except:
                startime = "Time element not found"
            try:
                event=  match.find('div', class_='title full').text.strip()
                print("Event:",  event)
            except:
                event = "Event element not found"
            try:
                dateandtime = match.find('div', class_='date full').text.strip()
                print("Date:",  dateandtime)
            except:
                dateandtime = "Date not found"
            try:
                sport = match.find('div', class_='text full').text.strip()
                print("Sport:",  sport)
            except:
                sport = "Sport element not found"
            try:
                singlechannel = match.find('div', class_='station full').text.strip()
                print("Main Channel:",  singlechannel)
                print("-----")
            except:
                singlechannel = "Single Channel element not found"
            
            extra_channels = []

            for channel in c_data:
                try:
                    channels = match.find('div', class_='stationLive active col-wrap')
                    print("Extra Channels:",  channel.text)
                    extra_channels.append(channel.text)
                except:
                    channels = "No channels found"
                    print(channels)
                    extra_channels.append(channel.text)
                print("-------")
            
            if extra_channels:
                for channel in extra_channels:    
                    text_file.writelines("__**Sport:**__" +':' + ' '+ sport +" n"+"__**Competitors:**__" +':' + ' '+ hometeam + awayteam + event+" n"+"__**Match Date:**__" +':' + ' ' +dateandtime +" n"+"__**Match Time:**__"+':' + ' ' +startime +" n"+ "__**Main Channel**__"+':' + ' '+singlechannel+" n" + "__**Channels**__"+':' + ' '+channel+" n"+'-' *20 + " n")
            else:
                text_file.writelines("__**Sport:**__" +':' + ' '+ sport +" n"+"__**Competitors:**__" +':' + ' '+ hometeam + awayteam + event+" n"+"__**Match Date:**__" +':' + ' ' +dateandtime +" n"+"__**Match Time:**__"+':' + ' ' +startime +" n"+ "__**Main Channel**__"+':' + ' '+singlechannel+" n" + "__**Channels**__"+':' + " n"+'-' *20 + " n")
            


            
def matches():
    
    dict = {"https://sport-tv-guide.live/live/darts/":"testing.txt",  
"https://sport-tv-guide.live/live/boxing/":"boxing.txt"}

    for key,  value  in dict.items():
        soup=makesoup(url = key)
        linkscrape(links= soup.find_all('a', {'class': 'article flag',  'href' : True}) , savefile = value)
        
matches()

Answered by bigbounty on December 23, 2021

Add your own answers!

Ask a Question

Get help from others!

© 2024 TransWikia.com. All rights reserved. Sites we Love: PCI Database, UKBizDB, Menu Kuliner, Sharing RPP