diff --git a/.dockerignore b/.dockerignore index c937bf0..69a2baf 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,8 @@ __pycache__ *.py[cod] +assets + *.pth *.pb *.pkl diff --git a/Dockerfile.cpu b/Dockerfile.cpu index 74c4604..6398766 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -11,7 +11,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins COPY requirements.txt . RUN python3 -m venv /opt/venv && \ - python3 -m pip install pip==19.2.3 pip-tools==4.0.0 + python3 -m pip install pip==19.2.3 pip-tools==4.0.0 setuptools==41.4.0 RUN echo "https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl \ --hash=sha256:ce648bb0c6b86dd99a8b5598ae6362a066cca8de69ad089cd206ace3bdec0a5f \ diff --git a/Dockerfile.gpu b/Dockerfile.gpu index 6aa75ce..f30e61a 100644 --- a/Dockerfile.gpu +++ b/Dockerfile.gpu @@ -11,7 +11,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins COPY requirements.txt . RUN python3 -m venv /opt/venv && \ - python3 -m pip install pip==19.2.3 pip-tools==4.0.0 + python3 -m pip install pip==19.2.3 pip-tools==4.0.0 setuptools==41.4.0 RUN echo "https://download.pytorch.org/whl/cu100/torch-1.3.0%2Bcu100-cp36-cp36m-linux_x86_64.whl \ --hash=sha256:2414744c5f9fc25e4ee181019df188b0ea28c7866ce7af13116c4d7e538460b7 \ diff --git a/ig65m/attention.py b/ig65m/attention.py new file mode 100644 index 0000000..e76d2bc --- /dev/null +++ b/ig65m/attention.py @@ -0,0 +1,101 @@ +import torch +import torch.nn as nn + +from einops import rearrange + + +# Attention: start with this paper +# https://arxiv.org/abs/1904.11492 + + +class SelfAttention3d(nn.Module): + def __init__(self, planes): + super().__init__() + + # Note: ratios below should be made configurable + + self.q = nn.Conv3d(planes, planes // 8, kernel_size=1, bias=False) + self.k = nn.Conv3d(planes, planes // 8, kernel_size=1, bias=False) + self.v = nn.Conv3d(planes, planes // 2, kernel_size=1, bias=False) + self.z = nn.Conv3d(planes // 2, planes, kernel_size=1, bias=False) + + self.y = nn.Parameter(torch.tensor(0.)) + + def forward(self, x): + q = self.q(x) + k = self.k(x) + v = self.v(x) + + # Note: pooling below should be made configurable + + k = nn.functional.max_pool3d(k, (2, 2, 2)) + v = nn.functional.max_pool3d(v, (2, 2, 2)) + + q = rearrange(q, "n c t h w -> n (t h w) c") + k = rearrange(k, "n c t h w -> n c (t h w)") + v = rearrange(v, "n c t h w -> n c (t h w)") + + beta = torch.bmm(q, k) + beta = torch.softmax(beta, dim=-1) + beta = rearrange(beta, "n thw c -> n c thw") + + att = torch.bmm(v, beta) + att = rearrange(att, "n c (t h w) -> n c t h w", + t=x.size(2), h=x.size(3), w=x.size(4)) + + return self.y * self.z(att) + x + + +class SimpleSelfAttention3d(nn.Module): + def __init__(self, planes): + super().__init__() + + self.k = nn.Conv3d(planes, 1, kernel_size=1, bias=False) + self.v = nn.Conv3d(planes, planes, kernel_size=1, bias=False) + + self.y = nn.Parameter(torch.tensor(0.)) + + def forward(self, x): + k = self.k(x) + k = rearrange(k, "n c t h w -> n (t h w) c") + k = torch.softmax(k, dim=-1) + + xx = rearrange(x, "n c t h w -> n c (t h w)") + + ctx = torch.bmm(xx, k) + ctx = rearrange(ctx, "n c () -> n c () () ()") + + att = self.v(ctx) + + return self.y * att + x + + +class GlobalContext3d(nn.Module): + def __init__(self, planes): + super().__init__() + + self.k = nn.Conv3d(planes, 1, kernel_size=1, bias=False) + + # Note: ratios below should be made configurable + + self.v = nn.Sequential( + nn.Conv3d(planes, planes // 8, kernel_size=1, bias=False), + nn.LayerNorm((planes // 8, 1, 1, 1)), + nn.ReLU(inplace=True), + nn.Conv3d(planes // 8, planes, kernel_size=1, bias=False)) + + self.y = nn.Parameter(torch.tensor(0.)) + + def forward(self, x): + k = self.k(x) + k = rearrange(k, "n c t h w -> n (t h w) c") + k = torch.softmax(k, dim=-1) + + xx = rearrange(x, "n c t h w -> n c (t h w)") + + ctx = torch.bmm(xx, k) + ctx = rearrange(ctx, "n c () -> n c () () ()") + + att = self.v(ctx) + + return self.y * att + x diff --git a/ig65m/cli/__main__.py b/ig65m/cli/__main__.py index 43ece28..382281f 100644 --- a/ig65m/cli/__main__.py +++ b/ig65m/cli/__main__.py @@ -5,6 +5,7 @@ import ig65m.cli.extract import ig65m.cli.semcode import ig65m.cli.dreamer +import ig65m.cli.vgan parser = argparse.ArgumentParser(prog="ig65m") @@ -50,5 +51,17 @@ dreamer.set_defaults(main=ig65m.cli.dreamer.main) +vgan = subcmd.add_parser("vgan", help="🥑 video generative adversarial network", formatter_class=Formatter) +vgan.add_argument("videos", type=Path, help="directory to read videos from") +vgan.add_argument("--checkpoints", type=Path, required=True, help="directory to save checkpoints to") +vgan.add_argument("--num-epochs", type=int, default=100, help="number of epochs to run through dataset") +vgan.add_argument("--batch-size", type=int, default=1, help="number of clips per batch") +vgan.add_argument("--clip-length", type=int, default=32, help="number of frames per clip") +vgan.add_argument("--z-dimension", type=int, default=128, help="noise dimensionality") +vgan.add_argument("--save-frequency", type=int, default=100, help="number of steps to checkpoint after") +vgan.add_argument("--logs", type=Path, required=True, help="directory to save TensorBoard logs to") +vgan.set_defaults(main=ig65m.cli.vgan.main) + + args = parser.parse_args() args.main(args) diff --git a/ig65m/cli/vgan.py b/ig65m/cli/vgan.py new file mode 100755 index 0000000..aab461b --- /dev/null +++ b/ig65m/cli/vgan.py @@ -0,0 +1,161 @@ +import sys + +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from torch.utils.tensorboard import SummaryWriter + +from torchvision.transforms import Compose + +from einops import rearrange +from einops.layers.torch import Rearrange + +from ig65m.datasets import VideoDirectoryDataset +from ig65m.transforms import ToTensor, Resize, CenterCrop, Normalize, Denormalize +from ig65m.losses import GeneratorHingeLoss, DiscriminatorHingeLoss +from ig65m.gan import Generator, Discriminator + + +def main(args): + if torch.cuda.is_available(): + print("🐎 Running on GPU(s)", file=sys.stderr) + device = torch.device("cuda") + torch.backends.cudnn.benchmark = True + else: + print("🐌 Running on CPU(s)", file=sys.stderr) + device = torch.device("cpu") + + mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5] + + transform = Compose([ + ToTensor(), + Rearrange("t h w c -> c t h w"), + #Resize(48), + CenterCrop(32), + Normalize(mean=mean, std=std), + ]) + + denormalize = Denormalize(mean=mean, std=std) + + dataset = VideoDirectoryDataset(args.videos, clip_length=args.clip_length, transform=transform) + loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=0) + + args.checkpoints.mkdir(exist_ok=True) + + g = Generator(args.z_dimension) + g = g.to(device) + g = nn.DataParallel(g) + + d = Discriminator() + d = d.to(device) + d = nn.DataParallel(d) + + lr_g = 1e-4 * 1 + lr_d = 1e-4 * 4 + + opt_g = torch.optim.Adam([p for p in g.parameters() if p.requires_grad], + lr=lr_g, betas=(0, 0.9)) + + opt_d = torch.optim.Adam([p for p in d.parameters() if p.requires_grad], + lr=lr_d, betas=(0, 0.9)) + + crit_g = GeneratorHingeLoss() + crit_d = DiscriminatorHingeLoss() + + zfix = torch.randn(1, args.z_dimension, device=device).clamp_(0) + + step = 0 + + with SummaryWriter(str(args.logs)) as summary: + for _ in range(args.num_epochs): + for inputs in loader: + adjust_learning_rate(opt_g, step, lr_g) + adjust_learning_rate(opt_d, step, lr_d) + + # Step D + + g.zero_grad() + d.zero_grad() + + z = torch.randn(inputs.size(0), args.z_dimension, device=device).clamp_(0) + + real_data = inputs.to(device) + fake_data = g(z) + + real_out = d(real_data) + fake_out = d(fake_data) + + loss_d_real, loss_d_fake = crit_d(real_out, fake_out) + loss_d = loss_d_real.mean() + loss_d_fake.mean() + loss_d.backward() + + opt_d.step() + + # Step G + + g.zero_grad() + d.zero_grad() + + z = torch.randn(inputs.size(0), args.z_dimension, device=device).clamp_(0) + + fake_data = g(z) + fake_out = d(fake_data) + + loss_g = crit_g(fake_out) + loss_g.backward() + + opt_g.step() + + # Done + + summary.add_scalar("Loss/Discriminator/Real", loss_d_real.item(), step) + summary.add_scalar("Loss/Discriminator/Fake", loss_d_fake.item(), step) + summary.add_scalar("Loss/Generator", loss_g.item(), step) + + if step % args.save_frequency == 0: + real_data = inputs + real_clip = denormalize(real_data[0]) + real_images = rearrange(real_clip, "c t h w -> t c h w") + + summary.add_images("Images/Real", real_images, step) + + with torch.no_grad(): + for m in g.modules(): + if isinstance(m, nn.BatchNorm3d): + m.eval() + + fake_data = g(zfix) + + for m in g.modules(): + if isinstance(m, nn.BatchNorm3d): + m.train() + + fake_clip = denormalize(fake_data[0]) + fake_images = rearrange(fake_clip, "c t h w -> t c h w") + + summary.add_images("Images/Fake", fake_images, step) + + state = {"step": step, + "g_state_dict": g.state_dict(), "d_state_dict": d.state_dict(), + "g_opt": opt_g.state_dict(), "d_opt": opt_d.state_dict()} + + torch.save(state, args.checkpoints / "state-{:010d}.pth".format(step)) + + step += 1 + + print("🥑 Done", file=sys.stderr) + + +# https://arxiv.org/abs/1706.02677 +def adjust_learning_rate(optimizer, step, lr): + warmup = 1000 + base = 0.01 * lr + + def lerp(c, first, last): + return first + c * (last - first) + + if step <= warmup: + lr = lerp(step / warmup, base, lr) + + for param_group in optimizer.param_groups: + param_group["lr"] = lr diff --git a/ig65m/datasets.py b/ig65m/datasets.py index 7facf79..7b63cdf 100644 --- a/ig65m/datasets.py +++ b/ig65m/datasets.py @@ -1,4 +1,6 @@ import math +import random +import itertools from torch.utils.data import IterableDataset, get_worker_info @@ -10,7 +12,7 @@ def __init__(self, video, first, last): assert first <= last for i in range(first): - ret, _ = video.read() + ret = video.grab() if not ret: raise RuntimeError("seeking to frame at index {} failed".format(i)) @@ -20,7 +22,7 @@ def __init__(self, video, first, last): self.last = last def __next__(self): - if self.it >= self.last or not self.video.isOpened(): + if self.it >= self.last: raise StopIteration ok, frame = self.video.read() @@ -57,11 +59,11 @@ def __next__(self): class VideoDataset(IterableDataset): - def __init__(self, path, clip, transform=None): + def __init__(self, path, clip_length, transform=None): super().__init__() self.path = path - self.clip = clip + self.clip_length = clip_length self.transform = transform video = cv2.VideoCapture(str(path)) @@ -72,7 +74,7 @@ def __init__(self, path, clip, transform=None): self.last = frames def __len__(self): - return self.last // self.clip + return self.last // self.clip_length def __iter__(self): info = get_worker_info() @@ -95,14 +97,14 @@ def __iter__(self): else: fn = lambda v: v # noqa: E731 - return TransformedRange(BatchedRange(rng, self.clip), fn) + return TransformedRange(BatchedRange(rng, self.clip_length), fn) class WebcamDataset(IterableDataset): - def __init__(self, clip, transform=None): + def __init__(self, clip_length, transform=None): super().__init__() - self.clip = clip + self.clip_length = clip_length self.transform = transform self.video = cv2.VideoCapture(0) @@ -120,4 +122,27 @@ def __iter__(self): else: fn = lambda v: v # noqa: E731 - return TransformedRange(BatchedRange(rng, self.clip), fn) + return TransformedRange(BatchedRange(rng, self.clip_length), fn) + + +class VideoDirectoryDataset(IterableDataset): + def __init__(self, path, clip_length, transform=None): + super().__init__() + + self.clip_length = clip_length + self.transform = transform + + paths = [p for p in path.iterdir() if p.is_file()] + + self.videos = [VideoDataset(p, clip_length, transform) for p in paths] + self.total_clips = sum(len(v) for v in self.videos) + + def __iter__(self): + info = get_worker_info() + + if info is not None: + raise RuntimeError("multiple workers not supported in VideoDirectoryDataset") + + random.shuffle(self.videos) + + return itertools.chain(*self.videos) diff --git a/ig65m/gan.py b/ig65m/gan.py new file mode 100644 index 0000000..81d0b3f --- /dev/null +++ b/ig65m/gan.py @@ -0,0 +1,150 @@ +import torch +import torch.nn as nn + +from einops.layers.torch import Rearrange + +from ig65m.upsample import Upsample3d +from ig65m.attention import GlobalContext3d +from ig65m.pool import AdaptiveSumPool3d + + +# BigGan building blocks; see paper +# https://arxiv.org/abs/1809.11096 + + +class Generator(nn.Module): + def __init__(self, z): + super().__init__() + + ch = 32 + T, H, W = 2, 2, 2 + + self.project = nn.Sequential( + nn.Linear(z, 16 * ch * T * H * W), + Rearrange("n (c t h w) -> n c t h w", t=T, h=H, w=W)) + + self.layer1 = ResBlockUp(16 * ch, 8 * ch) + self.layer2 = ResBlockUp(8 * ch, 4 * ch) + self.layer3 = ResBlockUp(4 * ch, 2 * ch) + self.layer4 = ResBlockUp(2 * ch, ch) + + self.ctx1 = GlobalContext3d(4 * ch) + self.ctx2 = GlobalContext3d(2 * ch) + + self.final = nn.Sequential( + nn.BatchNorm3d(ch), + nn.ReLU(inplace=True), + nn.Conv3d(ch, 3, kernel_size=3, padding=1, bias=True)) + + for m in self.modules(): + if isinstance(m, (nn.Conv3d, nn.Linear)): + nn.utils.spectral_norm(m) + + def forward(self, x): + x = self.project(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.ctx1(x) + x = self.layer3(x) + x = self.ctx2(x) + x = self.layer4(x) + + x = self.final(x) + x = torch.tanh(x) + + return x + + +class Discriminator(nn.Module): + def __init__(self): + super().__init__() + + ch = 32 + + self.layer1 = ResBlockDown(3, 2 * ch) + self.layer2 = ResBlockDown(2 * ch, 4 * ch) + self.layer3 = ResBlockDown(4 * ch, 8 * ch) + self.layer4 = ResBlockDown(8 * ch, 8 * ch) + + self.layer5 = ResBlockDown(8 * ch, 8 * ch, down=None) + + self.ctx1 = GlobalContext3d(2 * ch) + self.ctx2 = GlobalContext3d(4 * ch) + + self.final = nn.Sequential( + nn.ReLU(inplace=True), + AdaptiveSumPool3d(1), + Rearrange("n c () () () -> n c"), + nn.Linear(8 * ch, 1)) + + for m in self.modules(): + if isinstance(m, (nn.Conv3d, nn.Linear)): + nn.utils.spectral_norm(m) + + def forward(self, x): + x = self.layer1(x) + x = self.ctx1(x) + x = self.layer2(x) + x = self.ctx2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.final(x) + + return x + + +class ResBlockUp(nn.Module): + def __init__(self, inplanes, outplanes, up=(2, 2, 2)): + super().__init__() + + self.up = up + + self.bn1 = nn.BatchNorm3d(inplanes) + self.bn2 = nn.BatchNorm3d(outplanes) + + self.conv1 = nn.Conv3d(inplanes, outplanes, kernel_size=3, padding=1, bias=False) + self.conv2 = nn.Conv3d(outplanes, outplanes, kernel_size=3, padding=1, bias=False) + self.conv3 = nn.Conv3d(inplanes, outplanes, kernel_size=1, bias=False) + + def forward(self, x): + xx = self.bn1(x) + xx = nn.functional.relu(xx, inplace=True) + xx = nn.functional.interpolate(xx, scale_factor=self.up, mode="nearest") + xx = self.conv1(xx) + xx = self.bn2(xx) + xx = nn.functional.relu(xx, inplace=True) + xx = self.conv2(xx) + + x = nn.functional.interpolate(x, scale_factor=self.up, mode="nearest") + x = self.conv3(x) + + return x + xx + + +class ResBlockDown(nn.Module): + def __init__(self, inplanes, outplanes, down=(2, 2, 2)): + super().__init__() + + self.down = down + + self.conv1 = nn.Conv3d(inplanes, outplanes, kernel_size=3, padding=1, bias=False) + self.conv2 = nn.Conv3d(outplanes, outplanes, kernel_size=3, padding=1, bias=False) + self.conv3 = nn.Conv3d(inplanes, outplanes, kernel_size=1, bias=False) + + def forward(self, x): + xx = nn.functional.relu(x, inplace=False) + xx = self.conv1(xx) + xx = nn.functional.relu(xx, inplace=True) + xx = self.conv2(xx) + + if self.down is not None: + xx = nn.functional.avg_pool3d(xx, self.down) + + x = self.conv3(x) + + if self.down is not None: + x = nn.functional.avg_pool3d(x, self.down) + + return x + xx diff --git a/ig65m/losses.py b/ig65m/losses.py new file mode 100644 index 0000000..bad28d7 --- /dev/null +++ b/ig65m/losses.py @@ -0,0 +1,24 @@ +import torch.nn as nn + + +# Hinge losses for unconditioned +# GAN generator and discriminator +# https://arxiv.org/abs/1705.02894 + + +class GeneratorHingeLoss(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, fake): + return (-1.) * fake.mean() + + +class DiscriminatorHingeLoss(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, real, fake): + r = nn.functional.relu(1. - real).mean() + f = nn.functional.relu(1. + fake).mean() + return r, f diff --git a/ig65m/pool.py b/ig65m/pool.py new file mode 100644 index 0000000..e83272a --- /dev/null +++ b/ig65m/pool.py @@ -0,0 +1,21 @@ +import torch.nn as nn + +from einops import reduce + + +def adaptive_sum_pool3d(x, output_size): + if isinstance(output_size, int): + output_size = (output_size,) * 3 + + t, h, w = output_size + + return reduce(x, "n c (t t2) (h h2) (w w2) -> n c t h w", "sum", t=t, h=h, w=w) + + +class AdaptiveSumPool3d(nn.Module): + def __init__(self, output_size): + super().__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_sum_pool3d(x, self.output_size) diff --git a/ig65m/upsample.py b/ig65m/upsample.py new file mode 100644 index 0000000..6d9d3fd --- /dev/null +++ b/ig65m/upsample.py @@ -0,0 +1,56 @@ +import torch +import torch.nn as nn + +from einops import rearrange + + +# PixelShuffle and ICNR init paper +# https://arxiv.org/abs/1707.02937 + + +class Upsample3d(nn.Module): + def __init__(self, planes, upscale_factor): + super().__init__() + + self.explode = nn.Conv3d(planes, planes * (upscale_factor ** 3), + kernel_size=1, bias=False) + + self.shuffle = PixelShuffle3d(upscale_factor) + + icnr3d(self.explode.weight.data, upscale_factor) + + def forward(self, x): + x = self.explode(x) + x = nn.functional.relu(x, inplace=True) + x = self.shuffle(x) + return x + + +def icnr3d(x, upscale_factor): + n, c, t, h, w = x.size() + g = int(n / (upscale_factor ** 3)) + + z = torch.empty([g, c, t, h, w]) + z = nn.init.kaiming_normal_(z) + z = rearrange(z, "g c t h w -> c g (t h w)", c=g, g=c) + z = z.repeat(1, 1, upscale_factor ** 3) + z = z.view([c, n, t, h, w]) + z = rearrange(z, "c n t h w -> n c t h w") + + x.copy_(z) + + +def pixel_shuffle3d(x, upscale_factor): + return rearrange(x, "n (c t2 h2 w2) t h w -> n c (t t2) (h h2) (w w2)", + h2=upscale_factor, w2=upscale_factor, t2=upscale_factor, + c=x.size(1) // (upscale_factor ** 3)) + + +class PixelShuffle3d(nn.Module): + def __init__(self, upscale_factor): + super().__init__() + + self.upscale_factor = upscale_factor + + def forward(self, x): + return pixel_shuffle3d(x, self.upscale_factor) diff --git a/requirements.in b/requirements.in index 2c7cea5..5850288 100644 --- a/requirements.in +++ b/requirements.in @@ -3,3 +3,4 @@ opencv-contrib-python-headless einops pillow tqdm +tensorboard diff --git a/requirements.txt b/requirements.txt index 2cf6d2d..9641a7c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,26 +4,88 @@ # # pip-compile --generate-hashes # +absl-py==0.8.1 \ + --hash=sha256:d9129186431e150d7fe455f1cb1ecbb92bb5dba9da9bc3ef7b012d98c4db2526 \ + # via tensorboard einops==0.1.0 \ --hash=sha256:4ab512fe059c0841e1a315449ca9d7f35eaa05c8c095a14f2c1b92b2b77684d2 \ --hash=sha256:4fd64864fcb8159074da3213b9327c242536784416cbf423745ef8579850d30b -numpy==1.17.2 \ - --hash=sha256:05dbfe72684cc14b92568de1bc1f41e5f62b00f714afc9adee42f6311738091f \ - --hash=sha256:0d82cb7271a577529d07bbb05cb58675f2deb09772175fab96dc8de025d8ac05 \ - --hash=sha256:10132aa1fef99adc85a905d82e8497a580f83739837d7cbd234649f2e9b9dc58 \ - --hash=sha256:12322df2e21f033a60c80319c25011194cd2a21294cc66fee0908aeae2c27832 \ - --hash=sha256:16f19b3aa775dddc9814e02a46b8e6ae6a54ed8cf143962b4e53f0471dbd7b16 \ - --hash=sha256:3d0b0989dd2d066db006158de7220802899a1e5c8cf622abe2d0bd158fd01c2c \ - --hash=sha256:438a3f0e7b681642898fd7993d38e2bf140a2d1eafaf3e89bb626db7f50db355 \ - --hash=sha256:5fd214f482ab53f2cea57414c5fb3e58895b17df6e6f5bca5be6a0bb6aea23bb \ - --hash=sha256:73615d3edc84dd7c4aeb212fa3748fb83217e00d201875a47327f55363cef2df \ - --hash=sha256:7bd355ad7496f4ce1d235e9814ec81ee3d28308d591c067ce92e49f745ba2c2f \ - --hash=sha256:7d077f2976b8f3de08a0dcf5d72083f4af5411e8fddacd662aae27baa2601196 \ - --hash=sha256:a4092682778dc48093e8bda8d26ee8360153e2047826f95a3f5eae09f0ae3abf \ - --hash=sha256:b458de8624c9f6034af492372eb2fee41a8e605f03f4732f43fc099e227858b2 \ - --hash=sha256:e70fc8ff03a961f13363c2c95ef8285e0cf6a720f8271836f852cc0fa64e97c8 \ - --hash=sha256:ee8e9d7cad5fe6dde50ede0d2e978d81eafeaa6233fb0b8719f60214cf226578 \ - --hash=sha256:f4a4f6aba148858a5a5d546a99280f71f5ee6ec8182a7d195af1a914195b21a2 +grpcio==1.24.3 \ + --hash=sha256:01cb705eafba1108e2a947ba0457da4f6a1e8142c729fc61702b5fdd11009eb1 \ + --hash=sha256:0b5a79e29f167d3cd06faad6b15babbc2661066daaacf79373c3a8e67ca1fca1 \ + --hash=sha256:1097a61a0e97b3580642e6e1460a3a1f1ba1815e2a70d6057173bcc495417076 \ + --hash=sha256:13970e665a4ec4cec7d067d7d3504a0398c657d91d26c581144ad9044e429c9a \ + --hash=sha256:1557817cea6e0b87fad2a3e20da385170efb03a313db164e8078955add2dfa1b \ + --hash=sha256:1b0fb036a2f9dd93d9a35c57c26420eeb4b571fcb14b51cddf5b1e73ea5d882b \ + --hash=sha256:24d9e58d08e8cd545d8a3247a18654aff0e5e60414701696a8098fbb0d792b75 \ + --hash=sha256:2c38b586163d2b91567fe5e6d9e7798f792012365adc838a64b66b22dce3f4d4 \ + --hash=sha256:2df3ab4348507de60e1cbf75196403df1b9b4c4d4dc5bd11ac4eb63c46f691c7 \ + --hash=sha256:32f70f7c90454ea568b868af2e96616743718d9233d23f62407e98caed81dfbf \ + --hash=sha256:3af2a49d576820045c9c880ff29a5a96d020fe31b35d248519bfc6ccb8be4eac \ + --hash=sha256:4ff7d63800a63db031ebac6a6f581ae84877c959401c24c28f2cc51fd36c47ad \ + --hash=sha256:502aaa8be56f0ae69cda66bc27e1fb5531ceaa27ca515ec3c34f6178b1297180 \ + --hash=sha256:55358ce3ec283222e435f7dbc6603521438458f3c65f7c1cb33b8dabf56d70d8 \ + --hash=sha256:5583b01c67f85fa64a2c3fb085e5517c88b9c1500a2cce12d473cd99d0ed2e49 \ + --hash=sha256:58d9a5557d3eb7b734a3cea8b16c891099a522b3953a45a30bd4c034f75fc913 \ + --hash=sha256:5911f042c4ab177757eec5bcb4e2e9a2e823d888835d24577321bf55f02938fa \ + --hash=sha256:5e16ea922f4e5017c04fd94e2639b1006e03097e9dd0cbb7a1c852af3ea8bf2e \ + --hash=sha256:656e19d3f1b9050ee01b457f92838a9679d7cf84c995f708780f44484048705e \ + --hash=sha256:6a1435449a82008c451c7e1a82a834387b9108f9a8d27910f86e7c482f5568e9 \ + --hash=sha256:6ff02ca6cbed0ddb76e93ba0f8beb6a8c77d83a84eb7cafe2ae3399a8b9d69ea \ + --hash=sha256:76de68f60102f333bf4817f38e81ecbee68b850f5a5da9f355235e948ac40981 \ + --hash=sha256:7c6d7ddd50fc6548ea1dfe09c62509c4f95b8b40082287747be05aa8feb15ee2 \ + --hash=sha256:836b9d29507de729129e363276fe7c7d6a34c7961e0f155787025552b15d22c0 \ + --hash=sha256:869242b2baf8a888a4fe0548f86abc47cb4b48bdfd76ae62d6456e939c202e65 \ + --hash=sha256:8954b24bd08641d906ee50b2d638efc76df893fbd0913149b80484fd0eac40c9 \ + --hash=sha256:8cdea65d1abb2e698420db8daf20c8d272fbd9d96a51b26a713c1c76f237d181 \ + --hash=sha256:90161840b4fe9636f91ed0d3ea1e7e615e488cbea4e77594c889e5f3d7a776db \ + --hash=sha256:90fb6316b4d7d36700c40db4335902b78dcae13b5466673c21fd3b08a3c1b0c6 \ + --hash=sha256:91b34f58db2611c9a93ecf751028f97fba1f06e65f49b38f272f6aa5d2977331 \ + --hash=sha256:9474944a96a33eb8734fa8dc5805403d57973a3526204a5e1c1780d02e0572b6 \ + --hash=sha256:9a36275db2a4774ac16c6822e7af816ee048071d5030b4c035fd53942b361935 \ + --hash=sha256:9cbe26e2976b994c5f7c2d35a63354674d6ca0ce62f5b513f078bf63c1745229 \ + --hash=sha256:9eaeabb3c0eecd6ddd0c16767fd12d130e2cebb8c2618f959a278b1ff336ddc3 \ + --hash=sha256:a2bc7e10ebcf4be503ae427f9887e75c0cc24e88ce467a8e6eaca6bd2862406e \ + --hash=sha256:a5b42e6292ba51b8e67e09fc256963ba4ca9c04026de004d2fe59cc17e3c3776 \ + --hash=sha256:bd6ec1233c86c0b9bb5d03ec30dbe3ffbfa53335790320d99a7ae9018c5450f2 \ + --hash=sha256:bef57530816af54d66b1f4c70a8f851f320cb6f84d4b5a0b422b0e9811ea4e59 \ + --hash=sha256:c146a63eaadc6589b732780061f3c94cd0574388d372baccbb3c1597a9ebdb7a \ + --hash=sha256:c2efd3b130dc639d615b6f58980e1bfd1b177ad821f30827afa5001aa30ddd48 \ + --hash=sha256:c888b18f7392e6cc79a33a803e7ebd7890ac3318f571fca6b356526f35b53b12 \ + --hash=sha256:ca30721fda297ae22f16bc37aa7ed244970ddfdcb98247570cdd26daaad4665e \ + --hash=sha256:cf5f5340dd682ab034baa52f423a0f91326489c262ac9617fa06309ec05880e9 \ + --hash=sha256:d0726aa0d9b57c56985db5952e90fb1033a317074f2877db5307cdd6eede1564 \ + --hash=sha256:df442945b2dd6f8ae0e20b403e0fd4548cd5c2aad69200047cc3251257b78f65 \ + --hash=sha256:e08e758c31919d167c0867539bd3b2441629ef00aa595e3ea2b635273659f40a \ + --hash=sha256:e4864339deeeaefaad34dd3a432ee618a039fca28efb292949c855e00878203c \ + --hash=sha256:f4cd049cb94d9f517b1cab5668a3b345968beba093bc79a637e671000b3540ec \ + # via tensorboard +markdown==3.1.1 \ + --hash=sha256:2e50876bcdd74517e7b71f3e7a76102050edec255b3983403f1a63e7c8a41e7a \ + --hash=sha256:56a46ac655704b91e5b7e6326ce43d5ef72411376588afa1dd90e881b83c7e8c \ + # via tensorboard +numpy==1.17.3 \ + --hash=sha256:0b0dd8f47fb177d00fa6ef2d58783c4f41ad3126b139c91dd2f7c4b3fdf5e9a5 \ + --hash=sha256:25ffe71f96878e1da7e014467e19e7db90ae7d4e12affbc73101bcf61785214e \ + --hash=sha256:26efd7f7d755e6ca966a5c0ac5a930a87dbbaab1c51716ac26a38f42ecc9bc4b \ + --hash=sha256:28b1180c758abf34a5c3fea76fcee66a87def1656724c42bb14a6f9717a5bdf7 \ + --hash=sha256:2e418f0a59473dac424f888dd57e85f77502a593b207809211c76e5396ae4f5c \ + --hash=sha256:30c84e3a62cfcb9e3066f25226e131451312a044f1fe2040e69ce792cb7de418 \ + --hash=sha256:4650d94bb9c947151737ee022b934b7d9a845a7c76e476f3e460f09a0c8c6f39 \ + --hash=sha256:4dd830a11e8724c9c9379feed1d1be43113f8bcce55f47ea7186d3946769ce26 \ + --hash=sha256:4f2a2b279efde194877aff1f76cf61c68e840db242a5c7169f1ff0fd59a2b1e2 \ + --hash=sha256:62d22566b3e3428dfc9ec972014c38ed9a4db4f8969c78f5414012ccd80a149e \ + --hash=sha256:669795516d62f38845c7033679c648903200980d68935baaa17ac5c7ae03ae0c \ + --hash=sha256:75fcd60d682db3e1f8fbe2b8b0c6761937ad56d01c1dc73edf4ef2748d5b6bc4 \ + --hash=sha256:9395b0a41e8b7e9a284e3be7060db9d14ad80273841c952c83a5afc241d2bd98 \ + --hash=sha256:9e37c35fc4e9410093b04a77d11a34c64bf658565e30df7cbe882056088a91c1 \ + --hash=sha256:a0678793096205a4d784bd99f32803ba8100f639cf3b932dc63b21621390ea7e \ + --hash=sha256:b46554ad4dafb2927f88de5a1d207398c5385edbb5c84d30b3ef187c4a3894d8 \ + --hash=sha256:c867eeccd934920a800f65c6068acdd6b87e80d45cd8c8beefff783b23cdc462 \ + --hash=sha256:dd0667f5be56fb1b570154c2c0516a528e02d50da121bbbb2cbb0b6f87f59bc2 \ + --hash=sha256:de2b1c20494bdf47f0160bd88ed05f5e48ae5dc336b8de7cfade71abcc95c0b9 \ + --hash=sha256:f1df7b2b7740dd777571c732f98adb5aad5450aee32772f1b39249c8a50386f6 \ + --hash=sha256:ffca69e29079f7880c5392bf675eb8b4146479d976ae1924d01cd92b04cccbcc opencv-contrib-python-headless==4.1.1.26 \ --hash=sha256:083c1d0dce23b86c627ad8c7eddc93b19431431ea7413be78673950e8a67966d \ --hash=sha256:08db29152b2a124445e233ec90786a93150e565cdc83f37208e6ccdee87493a4 \ @@ -52,33 +114,70 @@ opencv-contrib-python-headless==4.1.1.26 \ --hash=sha256:eca35aca76e7e1debd051083399bbf8319dfdb47ca13df56b0d8acb5c2215a22 \ --hash=sha256:f9d57c94410e91af940f331aa9351065ba9d470d05646b8fd289da1170051bd2 \ --hash=sha256:ffec278ef8c6a0341b656dd967c2109c861e39106b0067583756575c54c4caf2 -pillow==6.2.0 \ - --hash=sha256:00fdeb23820f30e43bba78eb9abb00b7a937a655de7760b2e09101d63708b64e \ - --hash=sha256:01f948e8220c85eae1aa1a7f8edddcec193918f933fb07aaebe0bfbbcffefbf1 \ - --hash=sha256:08abf39948d4b5017a137be58f1a52b7101700431f0777bec3d897c3949f74e6 \ - --hash=sha256:099a61618b145ecb50c6f279666bbc398e189b8bc97544ae32b8fcb49ad6b830 \ - --hash=sha256:2c1c61546e73de62747e65807d2cc4980c395d4c5600ecb1f47a650c6fa78c79 \ - --hash=sha256:2ed9c4f694861642401f27dc3cb99772be67cd190e84845c749dae0a06c3bfae \ - --hash=sha256:338581b30b908e111be578f0297255f6b57a51358cd16fa0e6f664c9a1f88bff \ - --hash=sha256:38c7d48a21cd06fdeee93987147b9b1c55b73b4cfcbf83240568bfbd5adee447 \ - --hash=sha256:43fd026f613c8e48a25eba1a92f4d2ad7f3903c95d8c33a11611a7717d2ab654 \ - --hash=sha256:4548236844327a718ce3bb182ab32a16fa2050c61e334e959f554cac052fb0df \ - --hash=sha256:5090857876c58885cfa388dc649e5db30aae98a068c26f3fd0ac9d7d9a4d9572 \ - --hash=sha256:5bbba34f97a26a93f5e8dec469ca4ddd712451418add43da946dbaed7f7a98d2 \ - --hash=sha256:65a28969a025a0eb4594637b6103201dc4ed2a9508bdab56ac33e43e3081c404 \ - --hash=sha256:892bb52b70bd5ea9dbbc3ac44f38e84f5a04e9d8b1bff48159d96cb795b81159 \ - --hash=sha256:8a9becd5cbd5062f973bcd2e7bc79483af310222de112b6541f8af1f93a3cc42 \ - --hash=sha256:972a7aaeb7c4a2795b52eef52ee991ef040b31009f36deca6207a986607b55f3 \ - --hash=sha256:97b119c436bfa96a92ac2ca525f7025836d4d4e64b1c9f9eff8dbaf3ff1d86f3 \ - --hash=sha256:9ba37698e242223f8053cc158f130aee046a96feacbeab65893dbe94f5530118 \ - --hash=sha256:b1b0e1f626a0f079c0d3696db70132fb1f29aa87c66aecb6501a9b8be64ce9f7 \ - --hash=sha256:c14c1224fd1a5be2733530d648a316974dbbb3c946913562c6005a76f21ca042 \ - --hash=sha256:c79a8546c48ae6465189e54e3245a97ddf21161e33ff7eaa42787353417bb2b6 \ - --hash=sha256:ceb76935ac4ebdf6d7bc845482a4450b284c6ccfb281e34da51d510658ab34d8 \ - --hash=sha256:e22bffaad04b4d16e1c091baed7f2733fc1ebb91e0c602abf1b6834d17158b1f \ - --hash=sha256:ec883b8e44d877bda6f94a36313a1c6063f8b1997aa091628ae2f34c7f97c8d5 \ - --hash=sha256:f1baa54d50ec031d1a9beb89974108f8f2c0706f49798f4777df879df0e1adb6 \ - --hash=sha256:f53a5385932cda1e2c862d89460992911a89768c65d176ff8c50cddca4d29bed +pillow==6.2.1 \ + --hash=sha256:047d9473cf68af50ac85f8ee5d5f21a60f849bc17d348da7fc85711287a75031 \ + --hash=sha256:0f66dc6c8a3cc319561a633b6aa82c44107f12594643efa37210d8c924fc1c71 \ + --hash=sha256:12c9169c4e8fe0a7329e8658c7e488001f6b4c8e88740e76292c2b857af2e94c \ + --hash=sha256:248cffc168896982f125f5c13e9317c059f74fffdb4152893339f3be62a01340 \ + --hash=sha256:27faf0552bf8c260a5cee21a76e031acaea68babb64daf7e8f2e2540745082aa \ + --hash=sha256:285edafad9bc60d96978ed24d77cdc0b91dace88e5da8c548ba5937c425bca8b \ + --hash=sha256:384b12c9aa8ef95558abdcb50aada56d74bc7cc131dd62d28c2d0e4d3aadd573 \ + --hash=sha256:38950b3a707f6cef09cd3cbb142474357ad1a985ceb44d921bdf7b4647b3e13e \ + --hash=sha256:4aad1b88933fd6dc2846552b89ad0c74ddbba2f0884e2c162aa368374bf5abab \ + --hash=sha256:4ac6148008c169603070c092e81f88738f1a0c511e07bd2bb0f9ef542d375da9 \ + --hash=sha256:4deb1d2a45861ae6f0b12ea0a786a03d19d29edcc7e05775b85ec2877cb54c5e \ + --hash=sha256:59aa2c124df72cc75ed72c8d6005c442d4685691a30c55321e00ed915ad1a291 \ + --hash=sha256:5a47d2123a9ec86660fe0e8d0ebf0aa6bc6a17edc63f338b73ea20ba11713f12 \ + --hash=sha256:5cc901c2ab9409b4b7ac7b5bcc3e86ac14548627062463da0af3b6b7c555a871 \ + --hash=sha256:6c1db03e8dff7b9f955a0fb9907eb9ca5da75b5ce056c0c93d33100a35050281 \ + --hash=sha256:7ce80c0a65a6ea90ef9c1f63c8593fcd2929448613fc8da0adf3e6bfad669d08 \ + --hash=sha256:809c19241c14433c5d6135e1b6c72da4e3b56d5c865ad5736ab99af8896b8f41 \ + --hash=sha256:83792cb4e0b5af480588601467c0764242b9a483caea71ef12d22a0d0d6bdce2 \ + --hash=sha256:846fa202bd7ee0f6215c897a1d33238ef071b50766339186687bd9b7a6d26ac5 \ + --hash=sha256:9f5529fc02009f96ba95bea48870173426879dc19eec49ca8e08cd63ecd82ddb \ + --hash=sha256:a423c2ea001c6265ed28700df056f75e26215fd28c001e93ef4380b0f05f9547 \ + --hash=sha256:ac4428094b42907aba5879c7c000d01c8278d451a3b7cccd2103e21f6397ea75 \ + --hash=sha256:b1ae48d87f10d1384e5beecd169c77502fcc04a2c00a4c02b85f0a94b419e5f9 \ + --hash=sha256:bf4e972a88f8841d8fdc6db1a75e0f8d763e66e3754b03006cbc3854d89f1cb1 \ + --hash=sha256:c6414f6aad598364aaf81068cabb077894eb88fed99c6a65e6e8217bab62ae7a \ + --hash=sha256:c710fcb7ee32f67baf25aa9ffede4795fd5d93b163ce95fdc724383e38c9df96 \ + --hash=sha256:c7be4b8a09852291c3c48d3c25d1b876d2494a0a674980089ac9d5e0d78bd132 \ + --hash=sha256:c9e5ffb910b14f090ac9c38599063e354887a5f6d7e6d26795e916b4514f2c1a \ + --hash=sha256:e0697b826da6c2472bb6488db4c0a7fa8af0d52fa08833ceb3681358914b14e5 \ + --hash=sha256:e9a3edd5f714229d41057d56ac0f39ad9bdba6767e8c888c951869f0bdd129b0 +protobuf==3.10.0 \ + --hash=sha256:125713564d8cfed7610e52444c9769b8dcb0b55e25cc7841f2290ee7bc86636f \ + --hash=sha256:1accdb7a47e51503be64d9a57543964ba674edac103215576399d2d0e34eac77 \ + --hash=sha256:27003d12d4f68e3cbea9eb67427cab3bfddd47ff90670cb367fcd7a3a89b9657 \ + --hash=sha256:3264f3c431a631b0b31e9db2ae8c927b79fc1a7b1b06b31e8e5bcf2af91fe896 \ + --hash=sha256:3c5ab0f5c71ca5af27143e60613729e3488bb45f6d3f143dc918a20af8bab0bf \ + --hash=sha256:45dcf8758873e3f69feab075e5f3177270739f146255225474ee0b90429adef6 \ + --hash=sha256:56a77d61a91186cc5676d8e11b36a5feb513873e4ae88d2ee5cf530d52bbcd3b \ + --hash=sha256:5984e4947bbcef5bd849d6244aec507d31786f2dd3344139adc1489fb403b300 \ + --hash=sha256:6b0441da73796dd00821763bb4119674eaf252776beb50ae3883bed179a60b2a \ + --hash=sha256:6f6677c5ade94d4fe75a912926d6796d5c71a2a90c2aeefe0d6f211d75c74789 \ + --hash=sha256:84a825a9418d7196e2acc48f8746cf1ee75877ed2f30433ab92a133f3eaf8fbe \ + --hash=sha256:b842c34fe043ccf78b4a6cf1019d7b80113707d68c88842d061fa2b8fb6ddedc \ + --hash=sha256:ca33d2f09dae149a1dcf942d2d825ebb06343b77b437198c9e2ef115cf5d5bc1 \ + --hash=sha256:db83b5c12c0cd30150bb568e6feb2435c49ce4e68fe2d7b903113f0e221e58fe \ + --hash=sha256:f50f3b1c5c1c1334ca7ce9cad5992f098f460ffd6388a3cabad10b66c2006b09 \ + --hash=sha256:f99f127909731cafb841c52f9216e447d3e4afb99b17bebfad327a75aee206de \ + # via tensorboard +six==1.12.0 \ + --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \ + --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \ + # via absl-py, grpcio, protobuf, tensorboard +tensorboard==2.0.0 \ + --hash=sha256:143e8c8226e812bed1ad26e9139e7aeda70ea4984aab40ade52a02454bec84e4 \ + --hash=sha256:d3559616ccad8d72e7a259bff51be61a9bf1e66e2cebdc782c33e4b588b5e943 tqdm==4.36.1 \ --hash=sha256:abc25d0ce2397d070ef07d8c7e706aede7920da163c64997585d42d3537ece3d \ --hash=sha256:dd3fcca8488bb1d416aa7469d2f277902f26260c45aa86b667b074cd44b3b115 +werkzeug==0.16.0 \ + --hash=sha256:7280924747b5733b246fe23972186c6b348f9ae29724135a6dfc1e53cea433e7 \ + --hash=sha256:e5f4a1f98b52b18a93da705a7458e55afb26f32bff83ff5d19189f92462d65c4 \ + # via tensorboard +wheel==0.33.6 \ + --hash=sha256:10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646 \ + --hash=sha256:f4da1763d3becf2e2cd92a14a7c920f0f00eca30fdde9ea992c836685b9faf28 \ + # via tensorboard