import requests import os from urllib.parse import urlparse from tqdm import tqdm from concurrent.futures import ThreadPoolExecutor, as_completed from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry
# 写入文件 withopen(local_path, 'wb') as f: with tqdm(total=total_size, unit='B', unit_scale=True, desc=os.path.basename(local_path), ncols=80) as pbar: for chunk in response.iter_content(chunk_size=1024): if chunk: f.write(chunk) pbar.update(len(chunk))
print(f'Saved as {local_path}') return local_path except Exception as e: print(f'Failed to download {url}. Error: {str(e)}') returnNone
# 批量下载函数(并行、带错误处理、重试、缓存和断点续传) defdownload_files(file_urls): with ThreadPoolExecutor(max_workers=5) as executor: # 控制并发数 futures = [executor.submit(download_file, url) for url in file_urls] for future in as_completed(futures): result = future.result() if result: print(f'Downloaded: {result}')