import requests
import os
import re
def download_genome_first(gcf_id):
if gcf_id.startswith("GCF"):
base_url = "https://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/"
elif gcf_id.startswith("GCA"):
base_url = "https://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/"
else:
print(f"Invalid ID: {gcf_id}. Skipping...")
return
parts = gcf_id.split('_')[1]
path_parts = [parts[i:i + 3] for i in range(0, len(parts), 3)]
path_parts.pop()
ftp_path = base_url + "/".join(path_parts)
try:
kv = {'user-Agent': 'Mozilla/5.0'}
response = requests.get(ftp_path, headers=kv)
response.encoding = response.apparent_encoding
except Exception as e:
print(f"第一次爬取失败: {e}")
return
html = response.text
pattern = rf'<a href="({gcf_id}[^/]+)/">'
url = re.findall(pattern, html)
if not url:
print(f"未找到匹配的目录: {gcf_id}")
return
url_2 = ftp_path + '/' + url[0] + '/' + url[0] + '_genomic.fna.gz'
print(url_2)
try:
response_2 = requests.get(url_2, headers=kv)
except Exception as e:
print(f"第二次爬取失败: {e}")
return
if response_2.status_code == 200:
file_name = url_2.split("/")[-1]
file_path = os.path.join(output_dir, file_name)
os.makedirs(output_dir, exist_ok=True)
with open(file_path, 'wb') as file:
file.write(response_2.content)
print(f"Downloaded {file_name} to {output_dir}")
else:
print(f"Failed to download file from {url_2}. Status code: {response_2.status_code}")
def batch_download(gcf_file):
with open(gcf_file, 'r') as file:
gcf_ids = [line.strip() for line in file.readlines()]
for gcf_id in gcf_ids:
print(f"Processing: {gcf_id}")
download_genome_first(gcf_id)
gcf_file = "./species-gcaids.txt"
output_dir = "./downloads"
batch_download(gcf_file)
print("所有文件已经下载完毕!")