Skip to content

Commit dd06bf3

Browse files
committed
package code for pip install
1 parent d4b2afe commit dd06bf3

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+139
-106
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ w = G.mapping(z, c, truncation_psi=0.5, truncation_cutoff=8)
151151
img = G.synthesis(w, noise_mode='const', force_fp32=True)
152152
```
153153

154-
Please refer to [`generate.py`](./generate.py), [`style_mixing.py`](./style_mixing.py), and [`projector.py`](./projector.py) for further examples.
154+
Please refer to [`generate.py`](stylegan2_ada_pytorch/generate.py), [`style_mixing.py`](stylegan2_ada_pytorch/style_mixing.py), and [`projector.py`](stylegan2_ada_pytorch/projector.py) for further examples.
155155

156156
## Preparing datasets
157157

pyproject.toml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
[build-system]
2+
requires = [
3+
"setuptools>=42",
4+
"wheel"
5+
]
6+
build-backend = "setuptools.build_meta"

setup.cfg

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
[metadata]
2+
name = stylegan2-ada-pytorch
3+
version = 1.0.0
4+
description = StyleGAN2-ADA - Official PyTorch implementation
5+
long_description = file: README.md
6+
long_description_content_type = text/markdown
7+
url = https://github.com/NVlabs/stylegan2-ada-pytorch
8+
project_urls =
9+
Bug Tracker = https://github.com/NVlabs/stylegan2-ada-pytorch/issues
10+
classifiers =
11+
Programming Language :: Python :: 3
12+
License :: OSI Approved :: MIT License
13+
Operating System :: OS Independent
14+
15+
[options]
16+
package_dir =
17+
= .
18+
packages = find:
19+
python_requires = >=3.6
20+
install_requires =
21+
torch >=1.7.0
22+
click
23+
requests
24+
tqdm
25+
pyspng
26+
ninja
27+
imageio-ffmpeg ==0.4.3
28+
29+
[options.packages.find]
30+
where = .

stylegan2_ada_pytorch/__init__.py

Whitespace-only changes.

calc_metrics.py renamed to stylegan2_ada_pytorch/calc_metrics.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,19 +14,17 @@
1414
import tempfile
1515
import copy
1616
import torch
17-
import dnnlib
1817

19-
import legacy
20-
from metrics import metric_main
21-
from metrics import metric_utils
22-
from torch_utils import training_stats
23-
from torch_utils import custom_ops
24-
from torch_utils import misc
18+
from stylegan2_ada_pytorch import legacy, dnnlib
19+
from stylegan2_ada_pytorch.metrics import metric_main, metric_utils
20+
from stylegan2_ada_pytorch.torch_utils import training_stats
21+
from stylegan2_ada_pytorch.torch_utils import custom_ops, misc
22+
2523

2624
#----------------------------------------------------------------------------
2725

2826
def subprocess_fn(rank, args, temp_dir):
29-
dnnlib.util.Logger(should_flush=True)
27+
stylegan2_ada_pytorch.dnnlib.util.Logger(should_flush=True)
3028

3129
# Init torch.distributed.
3230
if args.num_gpus > 1:
@@ -61,7 +59,7 @@ def subprocess_fn(rank, args, temp_dir):
6159
print(f'Calculating {metric}...')
6260
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
6361
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs,
64-
num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
62+
num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
6563
if rank == 0:
6664
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
6765
if rank == 0 and args.verbose:
@@ -128,7 +126,7 @@ def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
128126
ppl_zend Perceptual path length in Z at path endpoints against cropped image.
129127
ppl_wend Perceptual path length in W at path endpoints against cropped image.
130128
"""
131-
dnnlib.util.Logger(should_flush=True)
129+
stylegan2_ada_pytorch.dnnlib.util.Logger(should_flush=True)
132130

133131
# Validate arguments.
134132
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
@@ -138,11 +136,11 @@ def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
138136
ctx.fail('--gpus must be at least 1')
139137

140138
# Load network.
141-
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
139+
if not stylegan2_ada_pytorch.dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
142140
ctx.fail('--network must point to a file or URL')
143141
if args.verbose:
144142
print(f'Loading network from "{network_pkl}"...')
145-
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
143+
with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
146144
network_dict = legacy.load_network_pkl(f)
147145
args.G = network_dict['G_ema'] # subclass of torch.nn.Module
148146

File renamed without changes.
File renamed without changes.
File renamed without changes.

generate.py renamed to stylegan2_ada_pytorch/generate.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@
1313
from typing import List, Optional
1414

1515
import click
16-
import dnnlib
1716
import numpy as np
1817
import PIL.Image
1918
import torch
2019

21-
import legacy
20+
from stylegan2_ada_pytorch import legacy, dnnlib
21+
2222

2323
#----------------------------------------------------------------------------
2424

@@ -80,7 +80,7 @@ def generate_images(
8080

8181
print('Loading networks from "%s"...' % network_pkl)
8282
device = torch.device('cuda')
83-
with dnnlib.util.open_url(network_pkl) as f:
83+
with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as f:
8484
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
8585

8686
os.makedirs(outdir, exist_ok=True)

legacy.py renamed to stylegan2_ada_pytorch/legacy.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,9 @@
1212
import copy
1313
import numpy as np
1414
import torch
15-
import dnnlib
16-
from torch_utils import misc
15+
from stylegan2_ada_pytorch import dnnlib
16+
from stylegan2_ada_pytorch.torch_utils import misc
17+
1718

1819
#----------------------------------------------------------------------------
1920

@@ -165,7 +166,7 @@ def kwarg(tf_name, default=None, none=None):
165166
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
166167

167168
# Convert params.
168-
from training import networks
169+
from stylegan2_ada_pytorch.training import networks
169170
G = networks.Generator(**kwargs).eval().requires_grad_(False)
170171
# pylint: disable=unnecessary-lambda
171172
_populate_module_params(G,
@@ -262,7 +263,7 @@ def kwarg(tf_name, default=None):
262263
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
263264

264265
# Convert params.
265-
from training import networks
266+
from stylegan2_ada_pytorch.training import networks
266267
D = networks.Discriminator(**kwargs).eval().requires_grad_(False)
267268
# pylint: disable=unnecessary-lambda
268269
_populate_module_params(D,
@@ -305,7 +306,7 @@ def convert_network_pickle(source, dest, force_fp16):
305306
--dest=stylegan2-cat-config-f.pkl
306307
"""
307308
print(f'Loading "{source}"...')
308-
with dnnlib.util.open_url(source) as f:
309+
with stylegan2_ada_pytorch.dnnlib.util.open_url(source) as f:
309310
data = load_network_pkl(f, force_fp16=force_fp16)
310311
print(f'Saving "{dest}"...')
311312
with open(dest, 'wb') as f:
File renamed without changes.

metrics/metric_main.py renamed to stylegan2_ada_pytorch/metrics/metric_main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
import time
1111
import json
1212
import torch
13-
import dnnlib
13+
from .. import dnnlib
1414

1515
from . import metric_utils
1616
from . import frechet_inception_distance
@@ -58,7 +58,7 @@ def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full
5858
results = dnnlib.EasyDict(results),
5959
metric = metric,
6060
total_time = total_time,
61-
total_time_str = dnnlib.util.format_time(total_time),
61+
total_time_str = stylegan2_ada_pytorch.dnnlib.util.format_time(total_time),
6262
num_gpus = opts.num_gpus,
6363
)
6464

metrics/metric_utils.py renamed to stylegan2_ada_pytorch/metrics/metric_utils.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@
1414
import uuid
1515
import numpy as np
1616
import torch
17-
import dnnlib
17+
from stylegan2_ada_pytorch import dnnlib
18+
1819

1920
#----------------------------------------------------------------------------
2021

@@ -44,7 +45,7 @@ def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, ve
4445
is_leader = (rank == 0)
4546
if not is_leader and num_gpus > 1:
4647
torch.distributed.barrier() # leader goes first
47-
with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
48+
with stylegan2_ada_pytorch.dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
4849
_feature_detector_cache[key] = torch.jit.load(f).eval().to(device)
4950
if is_leader and num_gpus > 1:
5051
torch.distributed.barrier() # others follow
@@ -156,7 +157,7 @@ def update(self, cur_items):
156157
total_time = cur_time - self.start_time
157158
time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)
158159
if (self.verbose) and (self.tag is not None):
159-
print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}')
160+
print(f'{self.tag:<19s} items {cur_items:<7d} time {stylegan2_ada_pytorch.dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item * 1e3:.2f}')
160161
self.batch_time = cur_time
161162
self.batch_items = cur_items
162163

@@ -178,7 +179,7 @@ def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1)
178179
#----------------------------------------------------------------------------
179180

180181
def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, **stats_kwargs):
181-
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
182+
dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
182183
if data_loader_kwargs is None:
183184
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
184185

@@ -236,7 +237,7 @@ def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel
236237

237238
# Setup generator and load labels.
238239
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
239-
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
240+
dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
240241

241242
# Image generation func.
242243
def run_generator(z, c):

metrics/perceptual_path_length.py renamed to stylegan2_ada_pytorch/metrics/perceptual_path_length.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import copy
1515
import numpy as np
1616
import torch
17-
import dnnlib
17+
from .. import dnnlib
1818
from . import metric_utils
1919

2020
#----------------------------------------------------------------------------
@@ -93,7 +93,7 @@ def forward(self, c):
9393
#----------------------------------------------------------------------------
9494

9595
def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size, jit=False):
96-
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
96+
dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
9797
vgg16_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
9898
vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose)
9999

projector.py renamed to stylegan2_ada_pytorch/projector.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919
import torch
2020
import torch.nn.functional as F
2121

22-
import dnnlib
23-
import legacy
22+
from stylegan2_ada_pytorch import legacy, dnnlib
23+
2424

2525
def project(
2626
G,
@@ -58,7 +58,7 @@ def logprint(*args):
5858

5959
# Load VGG16 feature detector.
6060
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
61-
with dnnlib.util.open_url(url) as f:
61+
with stylegan2_ada_pytorch.dnnlib.util.open_url(url) as f:
6262
vgg16 = torch.jit.load(f).eval().to(device)
6363

6464
# Features for target image.
@@ -161,7 +161,7 @@ def run_projection(
161161
# Load networks.
162162
print('Loading networks from "%s"...' % network_pkl)
163163
device = torch.device('cuda')
164-
with dnnlib.util.open_url(network_pkl) as fp:
164+
with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as fp:
165165
G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore
166166

167167
# Load target image.

style_mixing.py renamed to stylegan2_ada_pytorch/style_mixing.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@
1313
from typing import List
1414

1515
import click
16-
import dnnlib
1716
import numpy as np
1817
import PIL.Image
1918
import torch
2019

21-
import legacy
20+
from stylegan2_ada_pytorch import legacy, dnnlib
21+
2222

2323
#----------------------------------------------------------------------------
2424

@@ -61,7 +61,7 @@ def generate_style_mix(
6161
"""
6262
print('Loading networks from "%s"...' % network_pkl)
6363
device = torch.device('cuda')
64-
with dnnlib.util.open_url(network_pkl) as f:
64+
with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as f:
6565
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
6666

6767
os.makedirs(outdir, exist_ok=True)

torch_utils/misc.py renamed to stylegan2_ada_pytorch/torch_utils/misc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import numpy as np
1212
import torch
1313
import warnings
14-
import dnnlib
14+
from stylegan2_ada_pytorch import dnnlib
1515

1616
#----------------------------------------------------------------------------
1717
# Cached construction of constant tensors. Avoids CPU=>GPU copy when the

torch_utils/ops/bias_act.py renamed to stylegan2_ada_pytorch/torch_utils/ops/bias_act.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import warnings
1313
import numpy as np
1414
import torch
15-
import dnnlib
15+
from ... import dnnlib
1616
import traceback
1717

1818
from .. import custom_ops
@@ -21,15 +21,15 @@
2121
#----------------------------------------------------------------------------
2222

2323
activation_funcs = {
24-
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
25-
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
26-
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
27-
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
28-
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
29-
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
30-
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
31-
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
32-
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
24+
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
25+
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
26+
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
27+
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
28+
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
29+
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
30+
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
31+
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
32+
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
3333
}
3434

3535
#----------------------------------------------------------------------------

torch_utils/ops/conv2d_resample.py renamed to stylegan2_ada_pytorch/torch_utils/ops/conv2d_resample.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -105,19 +105,19 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight
105105

106106
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
107107
if kw == 1 and kh == 1 and (down > 1 and up == 1):
108-
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
108+
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0, px1, py0, py1], flip_filter=flip_filter)
109109
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
110110
return x
111111

112112
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
113113
if kw == 1 and kh == 1 and (up > 1 and down == 1):
114114
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
115-
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
115+
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0, px1, py0, py1], gain=up ** 2, flip_filter=flip_filter)
116116
return x
117117

118118
# Fast path: downsampling only => use strided convolution.
119119
if down > 1 and up == 1:
120-
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
120+
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0, px1, py0, py1], flip_filter=flip_filter)
121121
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
122122
return x
123123

@@ -136,7 +136,7 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight
136136
pxt = max(min(-px0, -px1), 0)
137137
pyt = max(min(-py0, -py1), 0)
138138
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
139-
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
139+
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0 + pxt, px1 + pxt, py0 + pyt, py1 + pyt], gain=up ** 2, flip_filter=flip_filter)
140140
if down > 1:
141141
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
142142
return x
@@ -147,7 +147,7 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight
147147
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
148148

149149
# Fallback: Generic reference implementation.
150-
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
150+
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0, px1, py0, py1], gain=up ** 2, flip_filter=flip_filter)
151151
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
152152
if down > 1:
153153
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
File renamed without changes.

torch_utils/persistence.py renamed to stylegan2_ada_pytorch/torch_utils/persistence.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
import copy
2121
import uuid
2222
import types
23-
import dnnlib
23+
from stylegan2_ada_pytorch import dnnlib
2424

2525
#----------------------------------------------------------------------------
2626

0 commit comments

Comments
 (0)