Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Proof of concept of integration with Hugging Face Hub #3206

Open
wants to merge 2 commits into
base: develop
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 52 additions & 45 deletions gensim/downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def _get_parts(name):
return information['models'][name]["parts"]


def _download(name):
def _download(name, from_hf=False):
"""Download and extract the dataset/model.

Parameters
Expand All @@ -355,58 +355,65 @@ def _download(name):
------
Exception
If md5sum on client and in repo are different.

"""
url_load_file = "{base}/{fname}/__init__.py".format(base=DOWNLOAD_BASE_URL, fname=name)
data_folder_dir = os.path.join(BASE_DIR, name)
data_folder_dir_tmp = data_folder_dir + '_tmp'
tmp_dir = tempfile.mkdtemp()
init_path = os.path.join(tmp_dir, "__init__.py")
urllib.urlretrieve(url_load_file, init_path)
total_parts = _get_parts(name)
if total_parts > 1:
concatenated_folder_name = "{fname}.gz".format(fname=name)
concatenated_folder_dir = os.path.join(tmp_dir, concatenated_folder_name)
for part in range(0, total_parts):
url_data = "{base}/{fname}/{fname}.gz_0{part}".format(base=DOWNLOAD_BASE_URL, fname=name, part=part)

fname = "{f}.gz_0{p}".format(f=name, p=part)
if from_hf:
from huggingface_hub import snapshot_download
name = "Gensim/" + name
download_dir = snapshot_download(name, cache_dir=BASE_DIR)
logger.info("%s downloaded", name)

os.rename(download_dir, data_folder_dir)
else:
url_load_file = "{base}/{fname}/__init__.py".format(base=DOWNLOAD_BASE_URL, fname=name)
data_folder_dir_tmp = data_folder_dir + '_tmp'
tmp_dir = tempfile.mkdtemp()
init_path = os.path.join(tmp_dir, "__init__.py")
urllib.urlretrieve(url_load_file, init_path)
total_parts = _get_parts(name)
if total_parts > 1:
concatenated_folder_name = "{fname}.gz".format(fname=name)
concatenated_folder_dir = os.path.join(tmp_dir, concatenated_folder_name)
for part in range(0, total_parts):
url_data = "{base}/{fname}/{fname}.gz_0{part}".format(base=DOWNLOAD_BASE_URL, fname=name, part=part)

fname = "{f}.gz_0{p}".format(f=name, p=part)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(
url_data, dst_path,
reporthook=partial(_progress, part=part, total_parts=total_parts)
)
if _calculate_md5_checksum(dst_path) == _get_checksum(name, part):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("Part %s/%s downloaded", part + 1, total_parts)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
with open(concatenated_folder_dir, 'wb') as wfp:
for part in range(0, total_parts):
part_path = os.path.join(tmp_dir, "{fname}.gz_0{part}".format(fname=name, part=part))
with open(part_path, "rb") as rfp:
shutil.copyfileobj(rfp, wfp)
os.remove(part_path)
else:
url_data = "{base}/{fname}/{fname}.gz".format(base=DOWNLOAD_BASE_URL, fname=name)
fname = "{fname}.gz".format(fname=name)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(
url_data, dst_path,
reporthook=partial(_progress, part=part, total_parts=total_parts)
)
if _calculate_md5_checksum(dst_path) == _get_checksum(name, part):
urllib.urlretrieve(url_data, dst_path, reporthook=_progress)
if _calculate_md5_checksum(dst_path) == _get_checksum(name):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("Part %s/%s downloaded", part + 1, total_parts)
logger.info("%s downloaded", name)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
with open(concatenated_folder_dir, 'wb') as wfp:
for part in range(0, total_parts):
part_path = os.path.join(tmp_dir, "{fname}.gz_0{part}".format(fname=name, part=part))
with open(part_path, "rb") as rfp:
shutil.copyfileobj(rfp, wfp)
os.remove(part_path)
else:
url_data = "{base}/{fname}/{fname}.gz".format(base=DOWNLOAD_BASE_URL, fname=name)
fname = "{fname}.gz".format(fname=name)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(url_data, dst_path, reporthook=_progress)
if _calculate_md5_checksum(dst_path) == _get_checksum(name):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("%s downloaded", name)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")

if os.path.exists(data_folder_dir_tmp):
os.remove(data_folder_dir_tmp)
if os.path.exists(data_folder_dir_tmp):
os.remove(data_folder_dir_tmp)

shutil.move(tmp_dir, data_folder_dir_tmp)
os.rename(data_folder_dir_tmp, data_folder_dir)
shutil.move(tmp_dir, data_folder_dir_tmp)
os.rename(data_folder_dir_tmp, data_folder_dir)


def _get_filename(name):
Expand All @@ -432,7 +439,7 @@ def _get_filename(name):
return information['models'][name]["file_name"]


def load(name, return_path=False):
def load(name, return_path=False, from_hf=False):
"""Download (if needed) dataset/model and load it to memory (unless `return_path` is set).

Parameters
Expand Down Expand Up @@ -493,7 +500,7 @@ def load(name, return_path=False):
folder_dir = os.path.join(BASE_DIR, name)
path = os.path.join(folder_dir, file_name)
if not os.path.exists(folder_dir):
_download(name)
_download(name, from_hf)

if return_path:
return path
Expand Down