diff --git a/tf_keras/api/golden/v1/tensorflow.keras.datasets.boston_housing.pbtxt b/tf_keras/api/golden/v1/tensorflow.keras.datasets.boston_housing.pbtxt index bda31751d..36622343e 100644 --- a/tf_keras/api/golden/v1/tensorflow.keras.datasets.boston_housing.pbtxt +++ b/tf_keras/api/golden/v1/tensorflow.keras.datasets.boston_housing.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.boston_housing" tf_module { member_method { name: "load_data" - argspec: "args=[\'path\', \'test_split\', \'seed\'], varargs=None, keywords=None, defaults=[\'boston_housing.npz\', \'0.2\', \'113\'], " + argspec: "args=[\'path\', \'test_split\', \'seed\', \'cache_dir\'], varargs=None, keywords=None, defaults=[\'boston_housing.npz\', \'0.2\', \'113\', \'None\'], " } } diff --git a/tf_keras/api/golden/v1/tensorflow.keras.datasets.cifar10.pbtxt b/tf_keras/api/golden/v1/tensorflow.keras.datasets.cifar10.pbtxt index 8a5142f79..87f25460a 100644 --- a/tf_keras/api/golden/v1/tensorflow.keras.datasets.cifar10.pbtxt +++ b/tf_keras/api/golden/v1/tensorflow.keras.datasets.cifar10.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.cifar10" tf_module { member_method { name: "load_data" - argspec: "args=[], varargs=None, keywords=None, defaults=None" + argspec: "args=[\'cache_dir\'], varargs=None, keywords=None, defaults=[\'None\'], " } } diff --git a/tf_keras/api/golden/v1/tensorflow.keras.datasets.cifar100.pbtxt b/tf_keras/api/golden/v1/tensorflow.keras.datasets.cifar100.pbtxt index 16f184eeb..e7bbe9c91 100644 --- a/tf_keras/api/golden/v1/tensorflow.keras.datasets.cifar100.pbtxt +++ b/tf_keras/api/golden/v1/tensorflow.keras.datasets.cifar100.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.cifar100" tf_module { member_method { name: "load_data" - argspec: "args=[\'label_mode\'], varargs=None, keywords=None, defaults=[\'fine\'], " + argspec: "args=[\'label_mode\', \'cache_dir\'], varargs=None, keywords=None, defaults=[\'fine\', \'None\'], " } } diff --git a/tf_keras/api/golden/v1/tensorflow.keras.datasets.fashion_mnist.pbtxt b/tf_keras/api/golden/v1/tensorflow.keras.datasets.fashion_mnist.pbtxt index a0e14356f..4b5bcac2d 100644 --- a/tf_keras/api/golden/v1/tensorflow.keras.datasets.fashion_mnist.pbtxt +++ b/tf_keras/api/golden/v1/tensorflow.keras.datasets.fashion_mnist.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.fashion_mnist" tf_module { member_method { name: "load_data" - argspec: "args=[], varargs=None, keywords=None, defaults=None" + argspec: "args=[\'cache_dir\'], varargs=None, keywords=None, defaults=[\'None\'], " } } diff --git a/tf_keras/api/golden/v1/tensorflow.keras.datasets.imdb.pbtxt b/tf_keras/api/golden/v1/tensorflow.keras.datasets.imdb.pbtxt index ff962876b..362b0ef2b 100644 --- a/tf_keras/api/golden/v1/tensorflow.keras.datasets.imdb.pbtxt +++ b/tf_keras/api/golden/v1/tensorflow.keras.datasets.imdb.pbtxt @@ -6,6 +6,6 @@ tf_module { } member_method { name: "load_data" - argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'seed\', \'start_char\', \'oov_char\', \'index_from\'], varargs=None, keywords=kwargs, defaults=[\'imdb.npz\', \'None\', \'0\', \'None\', \'113\', \'1\', \'2\', \'3\'], " + argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'seed\', \'start_char\', \'oov_char\', \'index_from\', \'cache_dir\'], varargs=None, keywords=kwargs, defaults=[\'imdb.npz\', \'None\', \'0\', \'None\', \'113\', \'1\', \'2\', \'3\', \'None\'], " } } diff --git a/tf_keras/api/golden/v1/tensorflow.keras.datasets.mnist.pbtxt b/tf_keras/api/golden/v1/tensorflow.keras.datasets.mnist.pbtxt index 530bb0755..6efe0c41d 100644 --- a/tf_keras/api/golden/v1/tensorflow.keras.datasets.mnist.pbtxt +++ b/tf_keras/api/golden/v1/tensorflow.keras.datasets.mnist.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.mnist" tf_module { member_method { name: "load_data" - argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=[\'mnist.npz\'], " + argspec: "args=[\'path\', \'cache_dir\'], varargs=None, keywords=None, defaults=[\'mnist.npz\', \'None\'], " } } diff --git a/tf_keras/api/golden/v1/tensorflow.keras.datasets.reuters.pbtxt b/tf_keras/api/golden/v1/tensorflow.keras.datasets.reuters.pbtxt index 6f6446eb4..dc19041d9 100644 --- a/tf_keras/api/golden/v1/tensorflow.keras.datasets.reuters.pbtxt +++ b/tf_keras/api/golden/v1/tensorflow.keras.datasets.reuters.pbtxt @@ -10,6 +10,6 @@ tf_module { } member_method { name: "load_data" - argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'test_split\', \'seed\', \'start_char\', \'oov_char\', \'index_from\'], varargs=None, keywords=kwargs, defaults=[\'reuters.npz\', \'None\', \'0\', \'None\', \'0.2\', \'113\', \'1\', \'2\', \'3\'], " + argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'test_split\', \'seed\', \'start_char\', \'oov_char\', \'index_from\', \'cache_dir\'], varargs=None, keywords=kwargs, defaults=[\'reuters.npz\', \'None\', \'0\', \'None\', \'0.2\', \'113\', \'1\', \'2\', \'3\', \'None\'], " } } diff --git a/tf_keras/api/golden/v2/tensorflow.keras.datasets.boston_housing.pbtxt b/tf_keras/api/golden/v2/tensorflow.keras.datasets.boston_housing.pbtxt index bda31751d..36622343e 100644 --- a/tf_keras/api/golden/v2/tensorflow.keras.datasets.boston_housing.pbtxt +++ b/tf_keras/api/golden/v2/tensorflow.keras.datasets.boston_housing.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.boston_housing" tf_module { member_method { name: "load_data" - argspec: "args=[\'path\', \'test_split\', \'seed\'], varargs=None, keywords=None, defaults=[\'boston_housing.npz\', \'0.2\', \'113\'], " + argspec: "args=[\'path\', \'test_split\', \'seed\', \'cache_dir\'], varargs=None, keywords=None, defaults=[\'boston_housing.npz\', \'0.2\', \'113\', \'None\'], " } } diff --git a/tf_keras/api/golden/v2/tensorflow.keras.datasets.cifar10.pbtxt b/tf_keras/api/golden/v2/tensorflow.keras.datasets.cifar10.pbtxt index 8a5142f79..87f25460a 100644 --- a/tf_keras/api/golden/v2/tensorflow.keras.datasets.cifar10.pbtxt +++ b/tf_keras/api/golden/v2/tensorflow.keras.datasets.cifar10.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.cifar10" tf_module { member_method { name: "load_data" - argspec: "args=[], varargs=None, keywords=None, defaults=None" + argspec: "args=[\'cache_dir\'], varargs=None, keywords=None, defaults=[\'None\'], " } } diff --git a/tf_keras/api/golden/v2/tensorflow.keras.datasets.cifar100.pbtxt b/tf_keras/api/golden/v2/tensorflow.keras.datasets.cifar100.pbtxt index 16f184eeb..e7bbe9c91 100644 --- a/tf_keras/api/golden/v2/tensorflow.keras.datasets.cifar100.pbtxt +++ b/tf_keras/api/golden/v2/tensorflow.keras.datasets.cifar100.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.cifar100" tf_module { member_method { name: "load_data" - argspec: "args=[\'label_mode\'], varargs=None, keywords=None, defaults=[\'fine\'], " + argspec: "args=[\'label_mode\', \'cache_dir\'], varargs=None, keywords=None, defaults=[\'fine\', \'None\'], " } } diff --git a/tf_keras/api/golden/v2/tensorflow.keras.datasets.fashion_mnist.pbtxt b/tf_keras/api/golden/v2/tensorflow.keras.datasets.fashion_mnist.pbtxt index a0e14356f..4b5bcac2d 100644 --- a/tf_keras/api/golden/v2/tensorflow.keras.datasets.fashion_mnist.pbtxt +++ b/tf_keras/api/golden/v2/tensorflow.keras.datasets.fashion_mnist.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.fashion_mnist" tf_module { member_method { name: "load_data" - argspec: "args=[], varargs=None, keywords=None, defaults=None" + argspec: "args=[\'cache_dir\'], varargs=None, keywords=None, defaults=[\'None\'], " } } diff --git a/tf_keras/api/golden/v2/tensorflow.keras.datasets.imdb.pbtxt b/tf_keras/api/golden/v2/tensorflow.keras.datasets.imdb.pbtxt index ff962876b..362b0ef2b 100644 --- a/tf_keras/api/golden/v2/tensorflow.keras.datasets.imdb.pbtxt +++ b/tf_keras/api/golden/v2/tensorflow.keras.datasets.imdb.pbtxt @@ -6,6 +6,6 @@ tf_module { } member_method { name: "load_data" - argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'seed\', \'start_char\', \'oov_char\', \'index_from\'], varargs=None, keywords=kwargs, defaults=[\'imdb.npz\', \'None\', \'0\', \'None\', \'113\', \'1\', \'2\', \'3\'], " + argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'seed\', \'start_char\', \'oov_char\', \'index_from\', \'cache_dir\'], varargs=None, keywords=kwargs, defaults=[\'imdb.npz\', \'None\', \'0\', \'None\', \'113\', \'1\', \'2\', \'3\', \'None\'], " } } diff --git a/tf_keras/api/golden/v2/tensorflow.keras.datasets.mnist.pbtxt b/tf_keras/api/golden/v2/tensorflow.keras.datasets.mnist.pbtxt index 530bb0755..6efe0c41d 100644 --- a/tf_keras/api/golden/v2/tensorflow.keras.datasets.mnist.pbtxt +++ b/tf_keras/api/golden/v2/tensorflow.keras.datasets.mnist.pbtxt @@ -2,6 +2,6 @@ path: "tensorflow.keras.datasets.mnist" tf_module { member_method { name: "load_data" - argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=[\'mnist.npz\'], " + argspec: "args=[\'path\', \'cache_dir\'], varargs=None, keywords=None, defaults=[\'mnist.npz\', \'None\'], " } } diff --git a/tf_keras/api/golden/v2/tensorflow.keras.datasets.reuters.pbtxt b/tf_keras/api/golden/v2/tensorflow.keras.datasets.reuters.pbtxt index 6f6446eb4..dc19041d9 100644 --- a/tf_keras/api/golden/v2/tensorflow.keras.datasets.reuters.pbtxt +++ b/tf_keras/api/golden/v2/tensorflow.keras.datasets.reuters.pbtxt @@ -10,6 +10,6 @@ tf_module { } member_method { name: "load_data" - argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'test_split\', \'seed\', \'start_char\', \'oov_char\', \'index_from\'], varargs=None, keywords=kwargs, defaults=[\'reuters.npz\', \'None\', \'0\', \'None\', \'0.2\', \'113\', \'1\', \'2\', \'3\'], " + argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'test_split\', \'seed\', \'start_char\', \'oov_char\', \'index_from\', \'cache_dir\'], varargs=None, keywords=kwargs, defaults=[\'reuters.npz\', \'None\', \'0\', \'None\', \'0.2\', \'113\', \'1\', \'2\', \'3\', \'None\'], " } } diff --git a/tf_keras/datasets/boston_housing.py b/tf_keras/datasets/boston_housing.py index dab2c1c4d..482cdb1cd 100644 --- a/tf_keras/datasets/boston_housing.py +++ b/tf_keras/datasets/boston_housing.py @@ -14,6 +14,8 @@ # ============================================================================== """Boston housing price regression dataset.""" +import os + import numpy as np from tf_keras.utils.data_utils import get_file @@ -23,7 +25,9 @@ @keras_export("keras.datasets.boston_housing.load_data") -def load_data(path="boston_housing.npz", test_split=0.2, seed=113): +def load_data( + path="boston_housing.npz", test_split=0.2, seed=113, cache_dir=None +): """Loads the Boston Housing dataset. This is a dataset taken from the StatLib library which is maintained at @@ -43,11 +47,12 @@ def load_data(path="boston_housing.npz", test_split=0.2, seed=113): [StatLib website](http://lib.stat.cmu.edu/datasets/boston). Args: - path: path where to cache the dataset locally - (relative to `~/.keras/datasets`). + path: path where to cache the dataset locally (relative to + `~/.keras/datasets`). test_split: fraction of the data to reserve as test set. - seed: Random seed for shuffling the data - before computing the test split. + seed: Random seed for shuffling the data before computing the test split. + cache_dir: directory where to cache the dataset locally. When None, + defaults to `~/.keras/datasets`. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. @@ -64,12 +69,16 @@ def load_data(path="boston_housing.npz", test_split=0.2, seed=113): origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) + if cache_dir: + cache_dir = os.path.expanduser(cache_dir) + os.makedirs(cache_dir, exist_ok=True) path = get_file( path, origin=origin_folder + "boston_housing.npz", file_hash=( # noqa: E501 "f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5" ), + cache_dir=cache_dir, ) with np.load(path, allow_pickle=True) as f: x = f["x"] diff --git a/tf_keras/datasets/cifar10.py b/tf_keras/datasets/cifar10.py index daab65443..5fd235b95 100644 --- a/tf_keras/datasets/cifar10.py +++ b/tf_keras/datasets/cifar10.py @@ -27,7 +27,7 @@ @keras_export("keras.datasets.cifar10.load_data") -def load_data(): +def load_data(cache_dir=None): """Loads the CIFAR10 dataset. This is a dataset of 50,000 32x32 color training images and 10,000 test @@ -49,6 +49,10 @@ def load_data(): | 8 | ship | | 9 | truck | + Args: + cache_dir: directory where to cache the dataset locally. When None, + defaults to `~/.keras/datasets`. + Returns: Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`. @@ -78,6 +82,9 @@ def load_data(): """ dirname = "cifar-10-batches-py" origin = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" + if cache_dir: + cache_dir = os.path.expanduser(cache_dir) + os.makedirs(cache_dir, exist_ok=True) path = get_file( dirname, origin=origin, @@ -85,6 +92,7 @@ def load_data(): file_hash=( # noqa: E501 "6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce" ), + cache_dir=cache_dir, ) num_train_samples = 50000 diff --git a/tf_keras/datasets/cifar100.py b/tf_keras/datasets/cifar100.py index af00173d1..8a584a2f8 100644 --- a/tf_keras/datasets/cifar100.py +++ b/tf_keras/datasets/cifar100.py @@ -27,7 +27,7 @@ @keras_export("keras.datasets.cifar100.load_data") -def load_data(label_mode="fine"): +def load_data(label_mode="fine", cache_dir=None): """Loads the CIFAR100 dataset. This is a dataset of 50,000 32x32 color training images and @@ -39,6 +39,8 @@ def load_data(label_mode="fine"): label_mode: one of "fine", "coarse". If it is "fine" the category labels are the fine-grained labels, if it is "coarse" the output labels are the coarse-grained superclasses. + cache_dir: directory where to cache the dataset locally. When None, + defaults to `~/.keras/datasets`. Returns: Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`. @@ -75,6 +77,9 @@ def load_data(label_mode="fine"): dirname = "cifar-100-python" origin = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" + if cache_dir: + cache_dir = os.path.expanduser(cache_dir) + os.makedirs(cache_dir, exist_ok=True) path = get_file( dirname, origin=origin, @@ -82,6 +87,7 @@ def load_data(label_mode="fine"): file_hash=( # noqa: E501 "85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7" ), + cache_dir=cache_dir, ) fpath = os.path.join(path, "train") diff --git a/tf_keras/datasets/fashion_mnist.py b/tf_keras/datasets/fashion_mnist.py index a3040cea5..b3f287e4a 100644 --- a/tf_keras/datasets/fashion_mnist.py +++ b/tf_keras/datasets/fashion_mnist.py @@ -26,7 +26,7 @@ @keras_export("keras.datasets.fashion_mnist.load_data") -def load_data(): +def load_data(cache_dir=None): """Loads the Fashion-MNIST dataset. This is a dataset of 60,000 28x28 grayscale images of 10 fashion categories, @@ -48,6 +48,10 @@ def load_data(): | 8 | Bag | | 9 | Ankle boot | + Args: + cache_dir: directory where to cache the dataset locally. When None, + defaults to `~/.keras/datasets`. + Returns: Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`. @@ -77,7 +81,6 @@ def load_data(): The copyright for Fashion-MNIST is held by Zalando SE. Fashion-MNIST is licensed under the [MIT license]( https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE). - """ dirname = os.path.join("datasets", "fashion-mnist") base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" @@ -87,10 +90,19 @@ def load_data(): "t10k-labels-idx1-ubyte.gz", "t10k-images-idx3-ubyte.gz", ] - + if cache_dir: + cache_dir = os.path.expanduser(cache_dir) + os.makedirs(cache_dir, exist_ok=True) paths = [] for fname in files: - paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname)) + paths.append( + get_file( + fname, + origin=base + fname, + cache_dir=cache_dir, + cache_subdir=dirname, + ) + ) with gzip.open(paths[0], "rb") as lbpath: y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) diff --git a/tf_keras/datasets/imdb.py b/tf_keras/datasets/imdb.py index 2810fe20b..f60c7a9c1 100644 --- a/tf_keras/datasets/imdb.py +++ b/tf_keras/datasets/imdb.py @@ -15,6 +15,7 @@ """IMDB sentiment classification dataset.""" import json +import os import numpy as np @@ -36,6 +37,7 @@ def load_data( start_char=1, oov_char=2, index_from=3, + cache_dir=None, **kwargs, ): """Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/). @@ -73,6 +75,8 @@ def load_data( Words that were cut out because of the `num_words` or `skip_top` limits will be replaced with this character. index_from: int. Index actual words with this index and higher. + cache_dir: directory where to cache the dataset locally. When None, + defaults to `~/.keras/datasets`. **kwargs: Used for backwards compatibility. Returns: @@ -108,12 +112,16 @@ def load_data( origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) + if cache_dir: + cache_dir = os.path.expanduser(cache_dir) + os.makedirs(cache_dir, exist_ok=True) path = get_file( path, origin=origin_folder + "imdb.npz", file_hash=( # noqa: E501 "69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f" ), + cache_dir=cache_dir, ) with np.load(path, allow_pickle=True) as f: x_train, labels_train = f["x_train"], f["y_train"] diff --git a/tf_keras/datasets/mnist.py b/tf_keras/datasets/mnist.py index d98b7cb98..9c5e01114 100644 --- a/tf_keras/datasets/mnist.py +++ b/tf_keras/datasets/mnist.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================== """MNIST handwritten digits dataset.""" +import os import numpy as np @@ -23,7 +24,7 @@ @keras_export("keras.datasets.mnist.load_data") -def load_data(path="mnist.npz"): +def load_data(path="mnist.npz", cache_dir=None): """Loads the MNIST dataset. This is a dataset of 60,000 28x28 grayscale images of the 10 digits, @@ -32,8 +33,9 @@ def load_data(path="mnist.npz"): [MNIST homepage](http://yann.lecun.com/exdb/mnist/). Args: - path: path where to cache the dataset locally - (relative to `~/.keras/datasets`). + path: path where to cache the dataset locally relative to cache_dir. + cache_dir: directory where to cache the dataset locally. When None, + defaults to `~/.keras/datasets`. Returns: Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`. @@ -72,12 +74,16 @@ def load_data(path="mnist.npz"): origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) + if cache_dir: + cache_dir = os.path.expanduser(cache_dir) + os.makedirs(cache_dir, exist_ok=True) path = get_file( path, origin=origin_folder + "mnist.npz", file_hash=( # noqa: E501 "731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1" ), + cache_dir=cache_dir, ) with np.load(path, allow_pickle=True) as f: x_train, y_train = f["x_train"], f["y_train"] diff --git a/tf_keras/datasets/reuters.py b/tf_keras/datasets/reuters.py index e0b7ce480..416b1ce03 100644 --- a/tf_keras/datasets/reuters.py +++ b/tf_keras/datasets/reuters.py @@ -15,6 +15,7 @@ """Reuters topic classification dataset.""" import json +import os import numpy as np @@ -37,6 +38,7 @@ def load_data( start_char=1, oov_char=2, index_from=3, + cache_dir=None, **kwargs, ): """Loads the Reuters newswire classification dataset. @@ -83,6 +85,8 @@ def load_data( Words that were cut out because of the `num_words` or `skip_top` limits will be replaced with this character. index_from: int. Index actual words with this index and higher. + cache_dir: directory where to cache the dataset locally. When None, + defaults to `~/.keras/datasets`. **kwargs: Used for backwards compatibility. Returns: @@ -114,12 +118,16 @@ def load_data( origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) + if cache_dir: + cache_dir = os.path.expanduser(cache_dir) + os.makedirs(cache_dir, exist_ok=True) path = get_file( path, origin=origin_folder + "reuters.npz", file_hash=( # noqa: E501 "d6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916" ), + cache_dir=cache_dir, ) with np.load(path, allow_pickle=True) as f: xs, labels = f["x"], f["y"]