Skip to content

Commit

Permalink
Update mscsploit.py
Browse files Browse the repository at this point in the history
some new changes
  • Loading branch information
misterhackerman authored Mar 28, 2024
1 parent 0399a78 commit 4fb5756
Showing 1 changed file with 6 additions and 4 deletions.
10 changes: 6 additions & 4 deletions mscsploit.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,11 +173,12 @@ def find_files_paths_and_links(navigation_dict, soup):

def download_from_dict(path_link_dict, folder):
counter = 0
for path, link, name in track(path_link_dict, description=f'Downloading...'):
for path, link, name in track(path_link_dict, description=f'[*] Downloading...'):

counter = counter + 1
count = f' ({counter}/{len(path_link_dict)})'
if os.path.isfile(folder + path + name):
print('[ Already there! ] ' + name)
print('[ Already there! ] ' + name + count)
continue

if not os.path.isdir(folder + path):
Expand All @@ -186,7 +187,7 @@ def download_from_dict(path_link_dict, folder):
response = requests.get(link, headers=HEADERS)
with open(folder + path + name, 'wb') as file:
file.write(response.content)
print('[*] Downloaded ' + name)
print('[*] Downloaded ' + name + count)


def main():
Expand All @@ -196,8 +197,9 @@ def main():
course_number = choose_course(courses)
folder = make_course_folder(courses, course_number, folder)
download_url = 'https://msc-mu.com/courses/' + course_number
print('[*] Requesting Page...')
print('[*] Requesting page...')
course_page = requests.get(download_url, headers=HEADERS)
print('[*] Parsing page into a soup')
soup = BeautifulSoup(course_page.text, 'html.parser')

nav_dict = create_nav_links_dictionary(soup)
Expand Down

0 comments on commit 4fb5756

Please sign in to comment.