批量爬取网站图片脚本
温馨提示:这篇文章已超过383天没有更新,请注意相关的内容是否还可用!
不分文件夹
(图片来源网络,侵删)
import requests
from bs4 import BeautifulSoup
import os
from concurrent.futures import ThreadPoolExecutor
def download_image(img_url):
# 检查图片后缀是否为.jpg或.jpeg
if img_url.lower().endswith(('.jpg', '.jpeg')):
try:
img_response = requests.get(img_url, stream=True)
img_size = int(img_response.headers.get('content-length', 0))
if img_size > 50 * 1024: # 大于50KB
filename = os.path.join(images_dir, img_url.split('/')[-1])
with open(filename, 'wb') as f:
for chunk in img_response.iter_content(1024):
f.write(chunk)
print(f"Downloaded {img_url}")
except Exception as e:
print(f"Error downloading {img_url}: {e}")
else:
print(f"Skipped {img_url} due to file extension")
def download_images_from_page(url):
page_response = requests.get(url)
page_soup = BeautifulSoup(page_response.content, 'html.parser')
images = page_soup.find_all('img')
with ThreadPoolExecutor(max_workers=5) as executor: # 可以调整max_workers来改变线程数
for img in images:
img_url = img['src']
executor.submit(download_image, img_url)
def main(base_url, start_path):
global images_dir
images_dir = 'images'
if not os.path.exists(images_dir):
os.makedirs(images_dir)
start_url = f"{base_url}/{start_path}"
response = requests.get(start_url)
soup = BeautifulSoup(response.content, 'html.parser')
links = soup.find_all('h3')
for link in links:
a_tag = link.find('a', href=True)
if a_tag:
full_url = f"{base_url}/{a_tag['href']}"
download_images_from_page(full_url)
# 示例中使用的基本URL和开始路径
base_url = 'http://xxxxxxx'
start_path = 'thread6.php?fid=15'
if __name__ == "__main__":
main(base_url, start_path)
按文件夹分类
import requests
from bs4 import BeautifulSoup
import os
from concurrent.futures import ProcessPoolExecutor
import re
def sanitize_folder_name(name):
"""清理文件夹名称,移除或替换不合法的文件系统字符。"""
return re.sub(r'[\\/*?:"|]', '_', name)
def download_image(data):
img_url, filename_prefix = data
if img_url.lower().endswith(('.jpg', '.jpeg')):
try:
img_response = requests.get(img_url, stream=True)
img_size = int(img_response.headers.get('content-length', 0))
if img_size > 20 * 1024: # 大于20KB
filename = f"{filename_prefix}.jpg"
with open(filename, 'wb') as f:
for chunk in img_response.iter_content(1024):
f.write(chunk)
print(f"Downloaded {filename}")
except Exception as e:
print(f"Error downloading {img_url}: {e}")
else:
print(f"Skipped {img_url} due to file extension")
def download_images_from_page(url, base_dir):
page_response = requests.get(url)
page_soup = BeautifulSoup(page_response.content, 'html.parser')
images = page_soup.find_all('img')
img_data = []
for i, img in enumerate(images):
img_url = img['src']
filename_prefix = os.path.join(base_dir, f"{i:04d}")
img_data.append((img_url, filename_prefix))
with ProcessPoolExecutor(max_workers=4) as executor: # 调整max_workers来改变进程数
executor.map(download_image, img_data)
def main(base_url, start_path):
global images_dir
images_dir = 'images'
if not os.path.exists(images_dir):
os.makedirs(images_dir)
start_url = f"{base_url}/{start_path}"
response = requests.get(start_url)
soup = BeautifulSoup(response.content, 'html.parser')
links = soup.find_all('h3')
for link_index, link in enumerate(links):
a_tag = link.find('a', href=True)
if a_tag:
folder_name = sanitize_folder_name(a_tag.text.strip())
full_url = f"{base_url}/{a_tag['href']}"
page_dir = os.path.join(images_dir, folder_name)
if not os.path.exists(page_dir):
os.makedirs(page_dir)
download_images_from_page(full_url, page_dir)
# 示例中使用的基本URL和开始路径
base_url = 'http://xxx/pw'
start_path = 'thread1022.php?fid=15&page=3'
if __name__ == "__main__":
main(base_url, start_path)
免责声明:我们致力于保护作者版权,注重分享,被刊用文章因无法核实真实出处,未能及时与作者取得联系,或有版权异议的,请联系管理员,我们会立即处理! 部分文章是来自自研大数据AI进行生成,内容摘自(百度百科,百度知道,头条百科,中国民法典,刑法,牛津词典,新华词典,汉语词典,国家院校,科普平台)等数据,内容仅供学习参考,不准确地方联系删除处理! 图片声明:本站部分配图来自人工智能系统AI生成,觅知网授权图片,PxHere摄影无版权图库和百度,360,搜狗等多加搜索引擎自动关键词搜索配图,如有侵权的图片,请第一时间联系我们,邮箱:ciyunidc@ciyunshuju.com。本站只作为美观性配图使用,无任何非法侵犯第三方意图,一切解释权归图片著作权方,本站不承担任何责任。如有恶意碰瓷者,必当奉陪到底严惩不贷!
