build a script that makes url requests in parallel, reads the data and puts it on a dict
import requests from multiprocessing.dummy import Pool as ThreadPool urls = ['http://www.python.org', 'http://www.cnn.com', 'http://www.yahoo.com', 'http://www.nytimes.com'] def request_data(url): return requests.get(url).text pool = ThreadPool(4) results = pool.map(request_data, urls) print(results)