Asynchronous Web Scraping with aiohttp 01-02-2024, 02:52 AM
#1
import aiohttp
import asyncio
async def fetch(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
return await response.text()
async def main():
urls = ['https://example1.com', 'https://example2.com', 'https://example3.com']
tasks = [fetch(url) for url in urls]
results = await asyncio.gather(*tasks)
print("Results:", results)
if __name__ == '__main__':
asyncio.run(main())
import asyncio
async def fetch(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
return await response.text()
async def main():
urls = ['https://example1.com', 'https://example2.com', 'https://example3.com']
tasks = [fetch(url) for url in urls]
results = await asyncio.gather(*tasks)
print("Results:", results)
if __name__ == '__main__':
asyncio.run(main())