Skip to content

Commit 22d78c7

Browse files
authored
Merge pull request #3 from ercaronte/feature/tf2compatibility
Upgrade lucid to use tensorflow v2 APIs and make it work on Colab platform
2 parents 9bdef1c + b61ef33 commit 22d78c7

File tree

67 files changed

+52124
-50764
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+52124
-50764
lines changed

lucid/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,4 +35,4 @@
3535
seed = 0
3636

3737
# Set the lucid version - setup.py imports this value!
38-
__version__ = "0.3.10"
38+
__version__ = "0.3.11"

lucid/misc/gl/glcontext.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
'prior importing this module.')
3838
raise
3939

40-
import ctypes
40+
import ctypes.util
4141
from ctypes import pointer
4242
import os
4343

@@ -117,4 +117,4 @@ def create_opengl_context(surface_size=(640, 480)):
117117

118118
egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT,
119119
None)
120-
egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
120+
egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)

lucid/misc/gradient_override.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def _foo_grad_alt(op, grad): ...
100100
override_dict_by_name[op_name] = grad_f
101101
else:
102102
override_dict_by_name[op_name] = register_to_random_name(grad_f)
103-
with tf.get_default_graph().gradient_override_map(override_dict_by_name):
103+
with tf.compat.v1.get_default_graph().gradient_override_map(override_dict_by_name):
104104
yield
105105

106106

@@ -154,7 +154,8 @@ def store_out(out_value):
154154
state["out_value"] = out_value
155155

156156
store_name = "store_" + f.__name__
157-
store = tf.py_func(store_out, [out], (), stateful=True, name=store_name)
157+
store = tf.compat.v1.py_func(store_out, [out], (), stateful=True, name=store_name)
158+
# store = tf.numpy_function(store_out, [out], (), stateful=True, name=store_name) # not yet implemented in TF 2.5
158159

159160
# Next, we create the mock function, with an overriden gradient.
160161
# Note that we need to make sure store gets evaluated before the mock
@@ -167,8 +168,8 @@ def mock_f(*inputs):
167168
with tf.control_dependencies([store]):
168169
with gradient_override_map({"PyFunc": grad_f_name}):
169170
mock_name = "mock_" + f.__name__
170-
mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True,
171-
name=mock_name)
171+
mock_out = tf.compat.v1.py_func(mock_f, inputs, out.dtype, stateful=True, name=mock_name)
172+
# mock_out = tf.numpy_function(mock_f, inputs, out.dtype, stateful=True, name=mock_name) # not yet implemented in TF 2.5
172173
mock_out.set_shape(out.get_shape())
173174

174175
# Finally, we can return the mock.

lucid/misc/io/loading.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def _load_text(handle, split=False, encoding="utf-8"):
126126
def _load_graphdef_protobuf(handle, **kwargs):
127127
"""Load GraphDef from a binary proto file."""
128128
# as_graph_def
129-
graph_def = tf.GraphDef.FromString(handle.read())
129+
graph_def = tf.compat.v1.GraphDef.FromString(handle.read())
130130

131131
# check if this is a lucid-saved model
132132
# metadata = modelzoo.util.extract_metadata(graph_def)

lucid/misc/io/reading.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
import logging
2626
from urllib.parse import urlparse
2727
from urllib import request
28-
from tensorflow.io.gfile import GFile
28+
from tensorflow.compat.v1.io.gfile import GFile
2929
import tensorflow as tf
3030
from tempfile import gettempdir
3131
import gc

lucid/misc/io/showing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ def _strip_consts(graph_def, max_const_size=32):
329329
This is mostly a utility function for graph(), and also originates here:
330330
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
331331
"""
332-
strip_def = tf.GraphDef()
332+
strip_def = tf.compat.v1.GraphDef()
333333
for n0 in graph_def.node:
334334
n = strip_def.node.add()
335335
n.MergeFrom(n0)

lucid/misc/io/writing.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
import logging
2626
from contextlib import contextmanager
2727
from urllib.parse import urlparse
28-
from tensorflow import gfile
28+
from tensorflow.compat.v1.io import gfile
2929

3030
from lucid.misc.io.scoping import scope_url
3131

@@ -66,14 +66,14 @@ def write_handle(path, mode=None):
6666
path = scope_url(path)
6767

6868
if _supports_make_dirs(path):
69-
gfile.MakeDirs(os.path.dirname(path))
69+
gfile.makedirs(os.path.dirname(path))
7070

7171
if mode is None:
7272
if _supports_binary_writing(path):
7373
mode = "wb"
7474
else:
7575
mode = "w"
7676

77-
handle = gfile.Open(path, mode)
77+
handle = gfile.GFile(path, mode)
7878
yield handle
7979
handle.close()

lucid/misc/redirected_relu_grad.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -92,12 +92,12 @@ def redirected_relu_grad(op, grad):
9292
x = op.inputs[0]
9393

9494
# Compute ReLu gradient
95-
relu_grad = tf.where(x < 0., tf.zeros_like(grad), grad)
95+
relu_grad = tf.compat.v1.where(x < 0., tf.zeros_like(grad), grad)
9696

9797
# Compute redirected gradient: where do we need to zero out incoming gradient
9898
# to prevent input going lower if its already negative
9999
neg_pushing_lower = tf.logical_and(x < 0., grad > 0.)
100-
redirected_grad = tf.where(neg_pushing_lower, tf.zeros_like(grad), grad)
100+
redirected_grad = tf.compat.v1.where(neg_pushing_lower, tf.zeros_like(grad), grad)
101101

102102
# Ensure we have at least a rank 2 tensor, as we expect a batch dimension
103103
assert_op = tf.Assert(tf.greater(tf.rank(relu_grad), 1), [tf.rank(relu_grad)])
@@ -106,12 +106,12 @@ def redirected_relu_grad(op, grad):
106106
batch = tf.shape(relu_grad)[0]
107107
reshaped_relu_grad = tf.reshape(relu_grad, [batch, -1])
108108
relu_grad_mag = tf.norm(reshaped_relu_grad, axis=1)
109-
result_grad = tf.where(relu_grad_mag > 0., relu_grad, redirected_grad)
109+
result_grad = tf.compat.v1.where(relu_grad_mag > 0., relu_grad, redirected_grad)
110110

111-
global_step_t =tf.train.get_or_create_global_step()
111+
global_step_t = tf.compat.v1.train.get_or_create_global_step()
112112
return_relu_grad = tf.greater(global_step_t, tf.constant(16, tf.int64))
113113

114-
return tf.where(return_relu_grad, relu_grad, result_grad)
114+
return tf.compat.v1.where(return_relu_grad, relu_grad, result_grad)
115115

116116

117117
def redirected_relu6_grad(op, grad):
@@ -120,15 +120,15 @@ def redirected_relu6_grad(op, grad):
120120

121121
# Compute ReLu gradient
122122
relu6_cond = tf.logical_or(x < 0., x > 6.)
123-
relu_grad = tf.where(relu6_cond, tf.zeros_like(grad), grad)
123+
relu_grad = tf.compat.v1.where(relu6_cond, tf.zeros_like(grad), grad)
124124

125125
# Compute redirected gradient: where do we need to zero out incoming gradient
126126
# to prevent input going lower if its already negative, or going higher if
127127
# already bigger than 6?
128128
neg_pushing_lower = tf.logical_and(x < 0., grad > 0.)
129129
pos_pushing_higher = tf.logical_and(x > 6., grad < 0.)
130130
dir_filter = tf.logical_or(neg_pushing_lower, pos_pushing_higher)
131-
redirected_grad = tf.where(dir_filter, tf.zeros_like(grad), grad)
131+
redirected_grad = tf.compat.v1.where(dir_filter, tf.zeros_like(grad), grad)
132132

133133
# Ensure we have at least a rank 2 tensor, as we expect a batch dimension
134134
assert_op = tf.Assert(tf.greater(tf.rank(relu_grad), 1), [tf.rank(relu_grad)])
@@ -137,9 +137,9 @@ def redirected_relu6_grad(op, grad):
137137
batch = tf.shape(relu_grad)[0]
138138
reshaped_relu_grad = tf.reshape(relu_grad, [batch, -1])
139139
relu_grad_mag = tf.norm(reshaped_relu_grad, axis=1)
140-
result_grad = tf.where(relu_grad_mag > 0., relu_grad, redirected_grad)
140+
result_grad = tf.compat.v1.where(relu_grad_mag > 0., relu_grad, redirected_grad)
141141

142-
global_step_t = tf.train.get_or_create_global_step()
142+
global_step_t = tf.compat.v1.train.get_or_create_global_step()
143143
return_relu_grad = tf.greater(global_step_t, tf.constant(16, tf.int64))
144144

145-
return tf.where(return_relu_grad, relu_grad, result_grad)
145+
return tf.compat.v1.where(return_relu_grad, relu_grad, result_grad)

lucid/misc/tfutil.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def create_session(target='', timeout_sec=10):
2525
when having multiple python sessions sharing the same GPU.
2626
'''
2727
graph = tf.Graph()
28-
config = tf.ConfigProto()
28+
config = tf.compat.v1.ConfigProto()
2929
config.gpu_options.allow_growth = True
3030
config.operation_timeout_in_ms = int(timeout_sec*1000)
31-
return tf.InteractiveSession(target=target, graph=graph, config=config)
31+
return tf.compat.v1.InteractiveSession(target=target, graph=graph, config=config)

lucid/modelzoo/other_models/Clip.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,3 +21,4 @@ class Clip_ResNet50_4x(Model):
2121
model_name = "Clip_ResNet50_4x"
2222
image_shape = [288, 288, 3]
2323
model_path = "gs://modelzoo/vision/other_models/Clip_ResNet50_4x.pb"
24+
dataset = None

lucid/modelzoo/other_models/InceptionV1.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
def _populate_inception_bottlenecks(scope):
2323
"""Add Inception bottlenecks and their pre-Relu versions to the graph."""
24-
graph = tf.get_default_graph()
24+
graph = tf.compat.v1.get_default_graph()
2525
for op in graph.get_operations():
2626
if op.name.startswith(scope+'/') and 'Concat' in op.type:
2727
name = op.name.split('/')[1]
@@ -37,7 +37,7 @@ def _populate_inception_bottlenecks(scope):
3737
class InceptionV1(Model):
3838
"""InceptionV1 (or 'GoogLeNet')
3939
40-
This is a (re?)implementation of InceptionV1 from the "Going deeper
40+
This is a (re?)implementation of InceptionV1 from the "Going deeper
4141
with convolutions" paper. Links:
4242
* Official CVPR paper, requires subscription: https://ieeexplore.ieee.org/document/7298594
4343
* Author preprint: https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf

lucid/modelzoo/util.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -55,31 +55,31 @@ def forget_xy(t):
5555
filter) when we only use early parts of it.
5656
"""
5757
shape = (t.shape[0], None, None, t.shape[3])
58-
return tf.placeholder_with_default(t, shape)
58+
return tf.compat.v1.placeholder_with_default(t, shape)
5959

6060

6161
def frozen_default_graph_def(input_node_names, output_node_names):
6262
"""Return frozen and simplified graph_def of default graph."""
6363

64-
sess = tf.get_default_session()
64+
sess = tf.compat.v1.get_default_session()
6565
if sess is None:
6666
raise RuntimeError("Default session not registered.")
6767

68-
input_graph_def = tf.get_default_graph().as_graph_def()
68+
input_graph_def = tf.compat.v1.get_default_graph().as_graph_def()
6969
if len(input_graph_def.node) == 0:
7070
raise RuntimeError("Default graph is empty. Is it possible your model wasn't constructed or is in a different graph?")
7171

72-
pruned_graph = tf.graph_util.remove_training_nodes(
72+
pruned_graph = tf.compat.v1.graph_util.remove_training_nodes(
7373
input_graph_def, protected_nodes=(output_node_names + input_node_names)
7474
)
75-
pruned_graph = tf.graph_util.extract_sub_graph(pruned_graph, output_node_names)
75+
pruned_graph = tf.compat.v1.graph_util.extract_sub_graph(pruned_graph, output_node_names)
7676

7777
# remove explicit device assignments
7878
for node in pruned_graph.node:
7979
node.device = ""
8080

81-
all_variable_names = [v.op.name for v in tf.global_variables()]
82-
output_graph_def = tf.graph_util.convert_variables_to_constants(
81+
all_variable_names = [v.op.name for v in tf.compat.v1.global_variables()]
82+
output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
8383
sess=sess,
8484
input_graph_def=pruned_graph,
8585
output_node_names=output_node_names,

lucid/modelzoo/vision_base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ def post_import(self, scope):
175175
def create_input(self, t_input=None, forget_xy_shape=True):
176176
"""Create input tensor."""
177177
if t_input is None:
178-
t_input = tf.placeholder(tf.float32, self.image_shape)
178+
t_input = tf.compat.v1.placeholder(tf.float32, self.image_shape)
179179
t_prep_input = t_input
180180
if len(t_prep_input.shape) == 3:
181181
t_prep_input = tf.expand_dims(t_prep_input, 0)
@@ -189,7 +189,7 @@ def create_input(self, t_input=None, forget_xy_shape=True):
189189

190190
def import_graph(self, t_input=None, scope='import', forget_xy_shape=True, input_map=None):
191191
"""Import model GraphDef into the current graph."""
192-
graph = tf.get_default_graph()
192+
graph = tf.compat.v1.get_default_graph()
193193
assert graph.unique_name(scope, False) == scope, (
194194
'Scope "%s" already exists. Provide explicit scope names when '
195195
'importing multiple instances of the model.') % scope
@@ -231,7 +231,7 @@ def get_layer(self, name):
231231
@staticmethod
232232
def suggest_save_args(graph_def=None):
233233
if graph_def is None:
234-
graph_def = tf.get_default_graph().as_graph_def()
234+
graph_def = tf.compat.v1.get_default_graph().as_graph_def()
235235
gdhelper = model_util.GraphDefHelper(graph_def)
236236
inferred_info = dict.fromkeys(("input_name", "image_shape", "output_names", "image_value_range"))
237237
node_shape = lambda n: [dim.size for dim in n.attr['shape'].shape.dim]

lucid/optvis/param/cppn.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@
1818

1919
import numpy as np
2020
import tensorflow as tf
21-
from tensorflow.contrib import slim
21+
# from tensorflow.contrib import slim
22+
import tf_slim as slim
2223

2324

2425
def _composite_activation(x, biased=True):
@@ -83,8 +84,8 @@ def cppn(
8384
[slim.conv2d],
8485
kernel_size=[1, 1],
8586
activation_fn=None,
86-
weights_initializer=tf.initializers.variance_scaling(),
87-
biases_initializer=tf.initializers.random_normal(0.0, 0.1),
87+
weights_initializer=tf.compat.v1.initializers.variance_scaling(),
88+
biases_initializer=tf.compat.v1.initializers.random_normal(0.0, 0.1),
8889
):
8990
for i in range(num_layers):
9091
x = slim.conv2d(net, num_hidden_channels)

lucid/optvis/param/random.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def rand_fft_image(shape, sd=None, decay_power=1):
3434
for _ in range(b):
3535
freqs = rfft2d_freqs(h, w)
3636
fh, fw = freqs.shape
37-
spectrum_var = sd * tf.random_normal([2, ch, fh, fw], dtype="float32")
37+
spectrum_var = sd * tf.random.normal([2, ch, fh, fw], dtype="float32")
3838
spectrum = tf.complex(spectrum_var[0], spectrum_var[1])
3939
spertum_scale = 1.0 / np.maximum(freqs, 1.0 / max(h, w)) ** decay_power
4040
# Scale the spectrum by the square-root of the number of pixels

lucid/optvis/param/resize_bilinear_nd.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def resize_bilinear_nd(t, target_shape):
105105
# We can then reshape and use the 2d tf.image.resize_bilinear() on the
106106
# inner two dimesions.
107107
t_ = tf.reshape(t, shape_)
108-
t_ = tf.image.resize_bilinear(t_, new_shape_[1:3])
108+
t_ = tf.compat.v1.image.resize_bilinear(t_, new_shape_[1:3])
109109

110110
# And then reshape back to our uncollapsed version, having finished resizing
111111
# two more dimensions in our shape.

lucid/optvis/param/spatial.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -125,11 +125,11 @@ def bilinearly_sampled_image(texture, uv):
125125
h, w = tf.unstack(tf.shape(texture)[:2])
126126
u, v = tf.split(uv, 2, axis=-1)
127127
v = 1.0 - v # vertical flip to match GL convention
128-
u, v = u * tf.to_float(w) - 0.5, v * tf.to_float(h) - 0.5
129-
u0, u1 = tf.floor(u), tf.ceil(u)
130-
v0, v1 = tf.floor(v), tf.ceil(v)
128+
u, v = u * tf.cast(w, tf.float32) - 0.5, v * tf.cast(h, tf.float32) - 0.5
129+
u0, u1 = tf.math.floor(u), tf.math.ceil(u)
130+
v0, v1 = tf.math.floor(v), tf.math.ceil(v)
131131
uf, vf = u - u0, v - v0
132-
u0, u1, v0, v1 = map(tf.to_int32, [u0, u1, v0, v1])
132+
u0, u1, v0, v1 = map(lambda x: tf.cast(x, tf.int32), [u0, u1, v0, v1])
133133

134134
def sample(u, v):
135135
vu = tf.concat([v % h, u % w], axis=-1)

lucid/optvis/render.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -86,16 +86,17 @@ def render_vis(model, objective_f, param_f=None, optimizer=None,
8686
multiple channel visualizations stacked on top of each other.
8787
"""
8888

89-
with tf.Graph().as_default() as graph, tf.Session() as sess:
89+
with tf.Graph().as_default() as graph, tf.compat.v1.Session() as sess:
90+
# print(f'render vis graph: {graph}')
9091

9192
if use_fixed_seed: # does not mean results are reproducible, see Args doc
92-
tf.set_random_seed(0)
93+
tf.compat.v1.set_random_seed(0)
9394

9495
T = make_vis_T(model, objective_f, param_f, optimizer, transforms,
9596
relu_gradient_override)
9697
print_objective_func = make_print_objective_func(print_objectives, T)
9798
loss, vis_op, t_image = T("loss"), T("vis_op"), T("input")
98-
tf.global_variables_initializer().run()
99+
tf.compat.v1.global_variables_initializer().run()
99100

100101
images = []
101102
try:
@@ -167,8 +168,8 @@ def make_vis_T(model, objective_f, param_f=None, optimizer=None,
167168
transform_f = make_transform_f(transforms)
168169
optimizer = make_optimizer(optimizer, [])
169170

170-
global_step = tf.train.get_or_create_global_step()
171-
init_global_step = tf.variables_initializer([global_step])
171+
global_step = tf.compat.v1.train.get_or_create_global_step()
172+
init_global_step = tf.compat.v1.variables_initializer([global_step])
172173
init_global_step.run()
173174

174175
if relu_gradient_override:
@@ -221,7 +222,7 @@ def make_t_image(param_f):
221222
if not isinstance(t_image, tf.Tensor):
222223
raise TypeError("param_f should produce a Tensor, but instead created a "
223224
+ str(type(t_image)) )
224-
elif t_image.graph != tf.get_default_graph():
225+
elif t_image.graph != tf.compat.v1.get_default_graph():
225226
raise TypeError("""param_f produced a t_image tensor belonging to a graph
226227
that isn't the default graph for rendering. Did you
227228
accidentally use render_vis when you meant to use
@@ -239,15 +240,15 @@ def make_transform_f(transforms):
239240

240241
def make_optimizer(optimizer, args):
241242
if optimizer is None:
242-
return tf.train.AdamOptimizer(0.05)
243+
return tf.compat.v1.train.AdamOptimizer(0.05)
243244
elif callable(optimizer):
244245
return optimizer(*args)
245-
elif isinstance(optimizer, tf.train.Optimizer):
246+
elif isinstance(optimizer, tf.compat.v1.train.Optimizer):
246247
return optimizer
247248
else:
248249
raise ("Could not convert optimizer argument to usable optimizer. "
249250
"Needs to be one of None, function from (graph, sess) to "
250-
"optimizer, or tf.train.Optimizer instance.")
251+
"optimizer, or tf.compat.v1.train.Optimizer instance.")
251252

252253

253254
def import_model(model, t_image, t_image_raw=None, scope="import", input_map=None):

0 commit comments

Comments
 (0)