-
Notifications
You must be signed in to change notification settings - Fork 2
/
bh-downloader.py
72 lines (53 loc) · 1.84 KB
/
bh-downloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# Yunus YILDIRIM
# -*- coding: utf-8 -*-
import requests
import os
from time import time as timer
from bs4 import BeautifulSoup
from multiprocessing.dummy import Pool as ThreadPool
# import for "'ascii' codec can't decode byte" error
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# import for "'ascii' codec can't decode byte" error
def linkCrawler(url):
print "[+] Crawling started."
urls = []
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
a = soup.find_all('a', class_='link-icon')
for href in a:
urls.append(href["href"])
print "[+] Crawling done."
print '[+] Total URLs Crawled : ' + str(len(urls))
return urls
def fileDownloader(link):
local_filename = link.split('/')[-1][6:]
try:
print "[+] Download started for: " + str(link)
r = requests.get(link, stream=True)
with open(local_filename, 'wb+') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print "[+] Download finished for: " + str(link)
except Exception as e:
print "[!] Failed to download file: " + str(link) + " error: " + str(e)
def main(main_url):
year = main_url.split("/")[3].upper()
directoryName = "BlackHat-%s-Documents" %(year)
if not os.path.exists(directoryName):
os.mkdir(directoryName)
os.chdir(directoryName)
links = linkCrawler(main_url)
pool = ThreadPool(8)
pool.map(fileDownloader, links)
if len(sys.argv) != 2:
print "[!] Missing parameters"
print "[+] Usage: python %s Brifieng_url" %(sys.argv[0])
print "[+] Ex: python %s https://www.blackhat.com/us-16/briefings.html" %(sys.argv[0])
else:
print "[+] Starting.."
start = timer()
main(sys.argv[1])
print 'Script Execution Time : %s sec' % (timer() - start)