-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
97 lines (85 loc) · 2.99 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import sqlite3
import json
import requests
import csv
import xml.etree.ElementTree as ET
# Parse XML Files found in the database
def ParseXML(XmlIn):
root = ET.fromstring(XmlIn)
url = ''
for child in root:
#print(child.tag, child.attrib)
grandchild = child.getchildren()
data = grandchild[0].attrib
url = data['manifest_url']
return url
# Retrieve the download links as a list from the found json files
# when a XML file is passed in, the function is called recursive to
# first parse the XML file for the main json file and then
# fetch the embedded pkg files as well
def RetrieveDownloadLinks(ConfigFileIn):
pkgurls = []
if '.json' in ConfigFileIn:
#get target json file from URL
r = requests.get(ConfigFileIn, verify=False)
if r.ok:
data = r.json()
pieces = data['pieces']
for p in pieces:
pkgurls.append(p['url'])
#print('skip')
if '.xml' in ConfigFileIn:
r = requests.get(ConfigFileIn, verify=False)
if r.ok:
# get manifest url
url = ParseXML(r.text)
pkgurls = RetrieveDownloadLinks(url)
return pkgurls
dbpath = '.\entitlement.db'
outfile = '.\dl_links.csv'
outfileLinksOnly = '.\dl_links_only.txt'
csvheader = ['id', 'title', 'url']
con = sqlite3.connect(dbpath)
c = con.cursor()
# get entitlement table name
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
target_table = ''
tables = c.fetchall()
for tabname in tables:
itm = tabname[0]
if "entitlement" in itm:
target_table = itm
break
if target_table != '':
# build query
q = f'select json from {target_table}'
c.execute(q)
# fetch all results
res = c.fetchall()
# open csv file for results
with open(outfile, 'w', encoding='UTF8', newline='') as f:
with open(outfileLinksOnly, 'w', encoding='UTF8', newline='') as f2:
writer = csv.writer(f)
writerLO = csv.writer(f2)
# write header to file
writer.writerow(csvheader)
# parse results
for dataset in res:
data_raw = dataset[0]
data = json.loads(data_raw)
game_id = data['id']
# get json download url
try:
json_dl_url = data['entitlement_attributes'][0]['reference_package_url']
game_title = data['game_meta']['name']
print(game_title)
# get dl links:
lnks = RetrieveDownloadLinks(json_dl_url)
for l in lnks:
outset = [game_id, game_title, l]
writer.writerow(outset)
print(f'title: {game_title} URL: {l}')
outsetLO = [l]
writerLO.writerow(outsetLO)
except KeyError:
print('No json file found - skip')