import requests
from bs4 import BeautifulSoup

# Define the URLs to scrape
urls = [
    'https://www.nytimes.com/2021/07/24/technology/disinformation-dozen-facebook-vaccine.html',
    'https://www.nbcnews.com/tech/social-media/disinformation-dozen-12-people-responsible-majority-covid-19-misinformation-social-n1265892',
    'https://www.bbc.com/news/world-us-canada-57984645',
    'https://www.theguardian.com/world/2021/jul/17/covid-vaccine-misinformation-disinformation-dozen',
    'https://www.reuters.com/business/healthcare-pharmaceuticals/disinformation-dozen-anti-vaxxers-play-key-role-spreading-falsehoods-2021-07-17/',
    'https://www.cnn.com/2021/07/17/tech/disinformation-dozen-facebook-instagram-twitter/index.html',
    'https://www.businessinsider.com/disinformation-dozen-anti-vaxxers-facebook-instagram-twitter-2021-7',
    'https://www.forbes.com/sites/alisondurkee/2021/07/17/disinformation-dozen-anti-vaccine-activists-responsible-for-two-thirds-of-covid-vaccine-misinformation-on-facebook/?sh=5d7d7d7c5c5d',
    'https://www.washingtonpost.com/technology/2021/07/17/disinformation-dozen-facebook-vaccine/',
    'https://www.npr.org/2021/07/17/1017497266/the-disinformation-dozen-who-pushed-lies-about-covid-19-vaccines
]

# Define the function to scrape the URLs
def scrape_urls(urls):
    data = []
    for url in urls:
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')
        paragraphs = soup.find_all('p')
        text = ''
        for p in paragraphs:
            text += p.get_text()
        data.append(text)
    return data

# Call the function to scrape the URLs
data = scrape_urls(urls)

# Write the data to a file
with open('data.txt', 'w') as f:
    for d in data:
        f.write(d + '\n')
