diff --git a/.gitignore b/.gitignore index 8ec9a5c7..5ac95191 100644 --- a/.gitignore +++ b/.gitignore @@ -20,4 +20,7 @@ __pycache__ *.png *.fig log -ae_output \ No newline at end of file +ae_output +*.bak +*.yaml +nncadTF2/ diff --git a/tensorflow/data_report.txt b/tensorflow/data_report.txt new file mode 100644 index 00000000..95dbc629 --- /dev/null +++ b/tensorflow/data_report.txt @@ -0,0 +1,52 @@ +TensorFlow 2.0 Upgrade Script +----------------------------- +Converted 5 files +Detected 0 issues that require attention +-------------------------------------------------------------------------------- +================================================================================ +Detailed log follows: + +================================================================================ +================================================================================ +Input tree: 'data_14/' +================================================================================ +-------------------------------------------------------------------------------- +Processing file 'data_14/cls_modelnet.py' + outputting to 'data/cls_modelnet.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'data_14/completion.py' + outputting to 'data/completion.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'data_14/midnet_data.py' + outputting to 'data/midnet_data.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'data_14/seg_partnet.py' + outputting to 'data/seg_partnet.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'data_14/seg_shapenet.py' + outputting to 'data/seg_shapenet.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + diff --git a/tensorflow/libs/__init__.py b/tensorflow/libs/__init__.py index b4b8e638..6864f42a 100644 --- a/tensorflow/libs/__init__.py +++ b/tensorflow/libs/__init__.py @@ -10,7 +10,7 @@ tf_intk = tf.int64 else: print('INFO from ocnn: The octree key is 32 bits, ' - 'the octree depth should be smaller than 8. ') + 'the octree depth should be smaller than 8.') octree_key64 = False tf_uintk = tf.uint32 tf_uints = tf.uint8 @@ -164,29 +164,29 @@ def _OctreeMaskGrad(op, grad): @ops.RegisterGradient('OctreeGather') def _OctreeGatherGrad(op, grad): - shape = tf.shape(op.inputs[0]) + shape = tf.shape(input=op.inputs[0]) grad_out = octree_gatherbk(grad, op.inputs[1], shape) return [grad_out, None] def octree_max_pool(data, octree, depth): - with tf.variable_scope('octree_max_pool'): + with tf.compat.v1.variable_scope('octree_max_pool'): data, mask = _octree_max_pool(data, octree, depth) # the bottom data depth data = octree_pad(data, octree, depth-1) # !!! depth-1 return data, mask def octree_max_unpool(data, mask, octree, depth): - with tf.variable_scope('octree_max_unpool'): + with tf.compat.v1.variable_scope('octree_max_unpool'): data = octree_depad(data, octree, depth) # !!! depth - data = _octree_max_unpool(data, mask, octree, depth) # the bottom data depth + data = _octree_max_unpool(data, mask, octree, depth+1) # the bottom data depth rp** fix problem return data def octree_avg_pool(data, octree, depth): - with tf.variable_scope('octree_avg_pool'): + with tf.compat.v1.variable_scope('octree_avg_pool'): data = tf.reshape(data, [1, int(data.shape[1]), -1, 8]) - data = tf.reduce_mean(data, axis=3, keepdims=True) + data = tf.reduce_mean(input_tensor=data, axis=3, keepdims=True) data = octree_pad(data, octree, depth-1) # !!! depth-1 return data @@ -197,10 +197,10 @@ def octree_conv_fast(data, octree, depth, channel, kernel_size=[3], stride=1): for i in range(len(kernel_size), 3): kernel_size.append(kernel_size[-1]) - with tf.variable_scope('octree_conv'): + with tf.compat.v1.variable_scope('octree_conv'): dim = int(data.shape[1]) * kernel_size[0] * kernel_size[1] * kernel_size[2] - kernel = tf.get_variable('weights', shape=[channel, dim], dtype=tf.float32, - initializer=tf.contrib.layers.xavier_initializer()) + kernel = tf.compat.v1.get_variable('weights', shape=[int(channel), dim], dtype=tf.float32, + initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) col = octree2col(data, octree, depth, kernel_size, stride) col = tf.reshape(col, [dim, -1]) conv = tf.matmul(kernel, col) @@ -215,10 +215,10 @@ def octree_conv_memory(data, octree, depth, channel, kernel_size=[3], stride=1): for i in range(len(kernel_size), 3): kernel_size.append(kernel_size[-1]) - with tf.variable_scope('octree_conv'): + with tf.compat.v1.variable_scope('octree_conv'): dim = int(data.shape[1]) * kernel_size[0] * kernel_size[1] * kernel_size[2] - kernel = tf.get_variable('weights', shape=[channel, dim], dtype=tf.float32, - initializer=tf.contrib.layers.xavier_initializer()) + kernel = tf.compat.v1.get_variable('weights', shape=[int(channel), dim], dtype=tf.float32, + initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) conv = _octree_conv(data, kernel, octree, depth, channel, kernel_size, stride) if stride == 2: conv = octree_pad(conv, octree, depth-1) @@ -230,11 +230,11 @@ def octree_deconv_fast(data, octree, depth, channel, kernel_size=[3], stride=1): for i in range(len(kernel_size), 3): kernel_size.append(kernel_size[-1]) - with tf.variable_scope('octree_deconv'): + with tf.compat.v1.variable_scope('octree_deconv'): kernel_sdim = kernel_size[0] * kernel_size[1] * kernel_size[2] dim = channel * kernel_sdim - kernel = tf.get_variable('weights', shape=[int(data.shape[1]), dim], dtype=tf.float32, - initializer=tf.contrib.layers.xavier_initializer()) + kernel = tf.compat.v1.get_variable('weights', shape=[int(data.shape[1]), dim], dtype=tf.float32, + initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) if stride == 2: data = octree_depad(data, octree, depth) depth = depth + 1 @@ -250,11 +250,11 @@ def octree_deconv_memory(data, octree, depth, channel, kernel_size=[3], stride=1 for i in range(len(kernel_size), 3): kernel_size.append(kernel_size[-1]) - with tf.variable_scope('octree_deconv'): + with tf.compat.v1.variable_scope('octree_deconv'): kernel_sdim = kernel_size[0] * kernel_size[1] * kernel_size[2] dim = channel * kernel_sdim - kernel = tf.get_variable('weights', shape=[int(data.shape[1]), dim], dtype=tf.float32, - initializer=tf.contrib.layers.xavier_initializer()) + kernel = tf.compat.v1.get_variable('weights', shape=[int(data.shape[1]), dim], dtype=tf.float32, + initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) if stride == 2: data = octree_depad(data, octree, depth) deconv = _octree_deconv(data, kernel, octree, depth, channel, kernel_size, stride) @@ -264,14 +264,14 @@ def octree_deconv_memory(data, octree, depth, channel, kernel_size=[3], stride=1 def octree_full_voxel(data, depth): height = 2 ** (3 * depth) channel = int(data.shape[1]) - with tf.variable_scope('octree_full_voxel'): + with tf.compat.v1.variable_scope('octree_full_voxel'): data = tf.reshape(data, [channel, -1, height]) # (1, C, H, 1) -> (C, batch_size, H1) - data = tf.transpose(data, perm=[1, 0, 2]) + data = tf.transpose(a=data, perm=[1, 0, 2]) return data def octree_tile(data, octree, depth): - with tf.variable_scope('octree_tile'): + with tf.compat.v1.variable_scope('octree_tile'): data = octree_depad(data, octree, depth) # (1, C, H, 1) data = tf.tile(data, [1, 1, 1, 8]) # (1, C, H, 8) channel = int(data.shape[1]) @@ -280,39 +280,39 @@ def octree_tile(data, octree, depth): def octree_global_pool(data, octree, depth): - with tf.variable_scope('octree_global_pool'): + with tf.compat.v1.variable_scope('octree_global_pool'): segment_ids = octree_property(octree, property_name='index', dtype=tf.int32, depth=depth, channel=1) segment_ids = tf.reshape(segment_ids, [-1]) data = tf.squeeze(data, axis=[0, 3]) # (1, C, H, 1) -> (C, H) - data = tf.transpose(data) # (C, H) -> (H, C) + data = tf.transpose(a=data) # (C, H) -> (H, C) output = tf.math.segment_mean(data, segment_ids) # (H, C) -> (batch_size, C) return output def octree_bilinear_legacy(data, octree, depth, target_depth): - with tf.variable_scope('octree_bilinear'): + with tf.compat.v1.variable_scope('octree_bilinear'): mask = tf.constant( [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1]], dtype=tf.float32) index, fracs = _octree_bilinear(octree, depth, target_depth) - feat = tf.transpose(tf.squeeze(data, [0, 3])) # (1, C, H, 1) -> (H, C) - output = tf.zeros([tf.shape(index)[0], tf.shape(feat)[1]], dtype=tf.float32) - norm = tf.zeros([tf.shape(index)[0], 1], dtype=tf.float32) + feat = tf.transpose(a=tf.squeeze(data, [0, 3])) # (1, C, H, 1) -> (H, C) + output = tf.zeros([tf.shape(input=index)[0], tf.shape(input=feat)[1]], dtype=tf.float32) + norm = tf.zeros([tf.shape(input=index)[0], 1], dtype=tf.float32) for i in range(8): idxi = index[:, i] - weight = tf.abs(tf.reduce_prod(mask[i, :] - fracs, axis=1, keepdims=True)) + weight = tf.abs(tf.reduce_prod(input_tensor=mask[i, :] - fracs, axis=1, keepdims=True)) output += weight * tf.gather(feat, idxi) norm += weight * tf.expand_dims(tf.cast(idxi > -1, dtype=tf.float32), -1) - output = tf.div(output, norm) - output = tf.expand_dims(tf.expand_dims(tf.transpose(output), 0), -1) + output = tf.compat.v1.div(output, norm) + output = tf.expand_dims(tf.expand_dims(tf.transpose(a=output), 0), -1) return output # pts: (N, 4), i.e. N x (x, y, z, id) # data: (1, C, H, 1) def octree_bilinear_v1(pts, data, octree, depth): - with tf.variable_scope('octree_bilinear'): + with tf.compat.v1.variable_scope('octree_bilinear'): mask = tf.constant( [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]], dtype=tf.float32) @@ -322,9 +322,9 @@ def octree_bilinear_v1(pts, data, octree, depth): xyzi = tf.floor(xyzf) # the integer part frac = xyzf - xyzi # the fraction part - feat = tf.transpose(tf.squeeze(data, [0, 3])) # (1, C, H, 1) -> (H, C) - output = tf.zeros([tf.shape(xyzi)[0], tf.shape(feat)[1]], dtype=tf.float32) - norm = tf.zeros([tf.shape(xyzi)[0], 1], dtype=tf.float32) + feat = tf.transpose(a=tf.squeeze(data, [0, 3])) # (1, C, H, 1) -> (H, C) + output = tf.zeros([tf.shape(input=xyzi)[0], tf.shape(input=feat)[1]], dtype=tf.float32) + norm = tf.zeros([tf.shape(input=xyzi)[0], 1], dtype=tf.float32) for i in range(8): maski = mask[i, :] @@ -333,20 +333,20 @@ def octree_bilinear_v1(pts, data, octree, depth): xyzm = tf.cast(tf.concat([xyzm, ids], axis=1), dtype=tf_uints) idxi = octree_search_key(octree_encode_key(xyzm), octree, depth, is_xyz=True) - weight = tf.abs(tf.reduce_prod(maskc - frac, axis=1, keepdims=True)) + weight = tf.abs(tf.reduce_prod(input_tensor=maskc - frac, axis=1, keepdims=True)) output += weight * tf.gather(feat, idxi) norm += weight * tf.expand_dims(tf.cast(idxi > -1, dtype=tf.float32), -1) - output = tf.div(output, norm) + output = tf.compat.v1.div(output, norm) - output = tf.expand_dims(tf.expand_dims(tf.transpose(output), 0), -1) - frac = tf.expand_dims(tf.expand_dims(tf.transpose(frac), 0), -1) + output = tf.expand_dims(tf.expand_dims(tf.transpose(a=output), 0), -1) + frac = tf.expand_dims(tf.expand_dims(tf.transpose(a=frac), 0), -1) return output, frac # pts: (N, 4), i.e. N x (x, y, z, id) # data: (1, C, H, 1) def octree_bilinear_v2(pts, data, octree, depth): - with tf.variable_scope('octree_bilinear'): + with tf.compat.v1.variable_scope('octree_bilinear'): mask = tf.constant( [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]], dtype=tf.float32) @@ -356,8 +356,8 @@ def octree_bilinear_v2(pts, data, octree, depth): xyzi = tf.floor(xyzf) # the integer part frac = xyzf - xyzi # the fraction part - output = tf.zeros([1, tf.shape(data)[1], tf.shape(xyzi)[0], 1], dtype=tf.float32) - norm = tf.zeros([tf.shape(xyzi)[0], 1], dtype=tf.float32) + output = tf.zeros([1, tf.shape(input=data)[1], tf.shape(input=xyzi)[0], 1], dtype=tf.float32) + norm = tf.zeros([tf.shape(input=xyzi)[0], 1], dtype=tf.float32) for i in range(8): maski = mask[i, :] @@ -367,11 +367,11 @@ def octree_bilinear_v2(pts, data, octree, depth): # !!! Note some elements of idxi may be -1 idxi = octree_search_key(octree_encode_key(xyzm), octree, depth, is_xyz=True) - weight = tf.abs(tf.reduce_prod(maskc - frac, axis=1, keepdims=True)) + weight = tf.abs(tf.reduce_prod(input_tensor=maskc - frac, axis=1, keepdims=True)) # output += weight * tf.gather(data, idxi, axis=2) output += weight * octree_gather(data, idxi) norm += weight * tf.expand_dims(tf.cast(idxi > -1, dtype=tf.float32), -1) - output = tf.div(output, norm) + output = tf.compat.v1.div(output, norm) return output @@ -379,7 +379,7 @@ def octree_bilinear_v2(pts, data, octree, depth): # data: (1, C, H, 1) # !!! Note: the pts should be scaled into [0, 2^depth] def octree_bilinear_v3(pts, data, octree, depth): - with tf.variable_scope('octree_linear'): + with tf.compat.v1.variable_scope('octree_linear'): mask = tf.constant( [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]], dtype=tf.float32) @@ -406,21 +406,21 @@ def octree_bilinear_v3(pts, data, octree, depth): idx = octree_search_key(key, octree, depth) # (N*8,) flgs = idx > -1 # filtering flags - idx = tf.boolean_mask(idx, flgs) + idx = tf.boolean_mask(tensor=idx, mask=flgs) - npt = tf.shape(xyzi)[0] + npt = tf.shape(input=xyzi)[0] ids = tf.reshape(tf.range(npt), [-1, 1]) ids = tf.reshape(tf.tile(ids, [1, 8]), [-1]) # (N*8,) - ids = tf.boolean_mask(ids, flgs) + ids = tf.boolean_mask(tensor=ids, mask=flgs) frac = maskc - tf.expand_dims(frac, axis=1) - weight = tf.abs(tf.reshape(tf.reduce_prod(frac, axis=2), [-1])) - weight = tf.boolean_mask(weight, flgs) + weight = tf.abs(tf.reshape(tf.reduce_prod(input_tensor=frac, axis=2), [-1])) + weight = tf.boolean_mask(tensor=weight, mask=flgs) indices = tf.concat([tf.expand_dims(ids, 1), tf.expand_dims(idx, 1)], 1) indices = tf.cast(indices, tf.int64) data = tf.squeeze(data, [0, 3]) # (C, H) - h = tf.shape(data)[1] + h = tf.shape(input=data)[1] mat = tf.SparseTensor(indices=indices, values=weight, dense_shape=[npt, h]) # channel, max_channel = int(data.shape[0]), 512 @@ -443,18 +443,18 @@ def octree_bilinear_v3(pts, data, octree, depth): output = tf.sparse.sparse_dense_matmul(mat, data, adjoint_a=False, adjoint_b=True) norm = tf.sparse.sparse_dense_matmul(mat, tf.ones([h, 1])) - output = tf.div(output, norm + 1.0e-10) # avoid dividing by zeros - output = tf.expand_dims(tf.expand_dims(tf.transpose(output), 0), -1) + output = tf.compat.v1.div(output, norm + 1.0e-10) # avoid dividing by zeros + output = tf.expand_dims(tf.expand_dims(tf.transpose(a=output), 0), -1) return output def octree_bilinear(data, octree, depth, target_depth, mask=None): - with tf.name_scope('Octree_bilinear'): + with tf.compat.v1.name_scope('Octree_bilinear'): xyz = octree_property(octree, property_name='xyz', depth=target_depth, channel=1, dtype=tf_uintk) xyz = tf.reshape(xyz, [-1]) if mask is not None: - xyz = tf.boolean_mask(xyz, mask) + xyz = tf.boolean_mask(tensor=xyz, mask=mask) xyz = tf.cast(octree_decode_key(xyz), dtype=tf.float32) # Attention: displacement 0.5, scale @@ -469,7 +469,7 @@ def octree_bilinear(data, octree, depth, target_depth, mask=None): # pts: (N, 4), i.e. N x (x, y, z, id) # data: (1, C, H, 1) def octree_nearest_interp(pts, data, octree, depth): - with tf.variable_scope('octree_nearest_interp'): + with tf.compat.v1.variable_scope('octree_nearest_interp'): # The value is defined on the center of each voxel, # so we can get the closest grid point by simply casting the value to tf_uints pts = tf.cast(pts, dtype=tf_uints) @@ -488,7 +488,7 @@ def octree_nearest_interp(pts, data, octree, depth): def octree_signal(octree, depth, channel): - with tf.name_scope('octree_signal'): + with tf.compat.v1.name_scope('octree_signal'): signal = octree_property(octree, property_name='feature', dtype=tf.float32, depth=depth, channel=channel) signal = tf.reshape(signal, [1, channel, -1, 1]) @@ -496,7 +496,7 @@ def octree_signal(octree, depth, channel): def octree_xyz(octree, depth, decode=True): - with tf.name_scope('octree_xyz'): + with tf.compat.v1.name_scope('octree_xyz'): xyz = octree_property(octree, property_name='xyz', dtype=tf_uintk, depth=depth, channel=1) xyz = tf.reshape(xyz, [-1]) # uint32, N @@ -506,7 +506,7 @@ def octree_xyz(octree, depth, decode=True): def octree_child(octree, depth): - with tf.name_scope('octree_child'): + with tf.compat.v1.name_scope('octree_child'): child = octree_property(octree, property_name='child', dtype=tf.int32, depth=depth, channel=1) child = tf.reshape(child, [-1]) @@ -514,7 +514,7 @@ def octree_child(octree, depth): def octree_split(octree, depth): - with tf.name_scope('octree_split'): + with tf.compat.v1.name_scope('octree_split'): split = octree_property(octree, property_name='split', dtype=tf.float32, depth=depth, channel=1) split = tf.reshape(split, [-1]) diff --git a/tensorflow/libs/build.py b/tensorflow/libs/build.py index 08c1eaea..75b56453 100644 --- a/tensorflow/libs/build.py +++ b/tensorflow/libs/build.py @@ -3,12 +3,14 @@ import tensorflow as tf import subprocess import argparse +print(os.getcwd()) +#os.chdir("libs") parser = argparse.ArgumentParser() parser.add_argument("--octree", type=str, required=False, default='../../octree') parser.add_argument("--cuda", type=str, required=False, - default='/usr/local/cuda-10.1') + default='/usr/local/cuda-11.4') parser.add_argument('--key64', type=str, required=False, default='false') parser.add_argument('--cc', type=str, required=False, @@ -156,11 +158,11 @@ # make if os.path.exists("object"): os.system("rm -r object") - os.mkdir("object") +os.mkdir("object") os.system("make -j all") # test os.chdir(LIBS_DIR + "/../test") # env variable cmd = 'export OCTREE_KEY=' + ('64' if KEY64 == '-DKEY64' else '32') -os.system(cmd + " && python test_all.py") \ No newline at end of file +os.system(cmd + " && python test_all.py") diff --git a/tensorflow/libs/octree_batch_op.cc b/tensorflow/libs/octree_batch_op.cc index d4241809..65ddf6e1 100644 --- a/tensorflow/libs/octree_batch_op.cc +++ b/tensorflow/libs/octree_batch_op.cc @@ -25,7 +25,7 @@ class OctreeBatchOp : public OpKernel { void Compute(OpKernelContext* context) override { // input octrees const Tensor& data_in = context->input(0); - auto octree_buffer = data_in.flat(); + auto octree_buffer = data_in.flat(); // int batch_size = data_in.shape().dim_size(0); int batch_size = data_in.shape().num_elements(); vector octrees_in; diff --git a/tensorflow/libs/octree_samples.cc b/tensorflow/libs/octree_samples.cc index 25d9c894..c76745b5 100644 --- a/tensorflow/libs/octree_samples.cc +++ b/tensorflow/libs/octree_samples.cc @@ -31,12 +31,16 @@ class OctreeSamplesOp : public OpKernel { OP_REQUIRES_OK(context, context->allocate_output(0, names.shape(), &octrees)); for (int i = 0; i < num; ++i) { - string name = names.flat()(i); - string& oct = octrees->flat()(i); + string name = names.flat()(i); size_t size = 0; const char* str = (const char*)octree::get_one_octree(name.c_str(), &size); - oct.assign(str, str + size); + +// string& oct = (string&)octrees->flat()(i); +// oct.assign(str, str + size); + + tstring& oct = octrees->flat()(i); + oct.assign(str, size); //.assign(str, str + size); } } }; diff --git a/tensorflow/libs/points2octree_op.cc b/tensorflow/libs/points2octree_op.cc index d75f7e14..aac12706 100644 --- a/tensorflow/libs/points2octree_op.cc +++ b/tensorflow/libs/points2octree_op.cc @@ -53,7 +53,7 @@ class PointsToOctreeOp : public OpKernel { // init the points Points point_cloud_; - point_cloud_.set(data_in.flat()(0).data()); + point_cloud_.set(data_in.flat()(0).data()); // check the points string msg; @@ -75,8 +75,8 @@ class PointsToOctreeOp : public OpKernel { // output Tensor* out_data = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, data_in.shape(), &out_data)); - string& out_str = out_data->flat()(0); - out_str.assign(octree_buf.begin(), octree_buf.end()); + tstring& out_str = out_data->flat()(0); + out_str.assign(&octree_buf[0], octree_buf.size()); } private: diff --git a/tensorflow/libs/points_property_op.cc b/tensorflow/libs/points_property_op.cc index 12869259..a6810f4c 100644 --- a/tensorflow/libs/points_property_op.cc +++ b/tensorflow/libs/points_property_op.cc @@ -29,7 +29,7 @@ class PointsPropertyOp : public OpKernel { void Compute(OpKernelContext* context) override { // input points const Tensor& data_in = context->input(0); - auto points_buffer = data_in.flat(); + auto points_buffer = data_in.flat(); int batch_size = data_in.NumElements(); vector points_in(batch_size); for (int i = 0; i < batch_size; ++i) { diff --git a/tensorflow/libs/points_set_property_op.cc b/tensorflow/libs/points_set_property_op.cc index f1c65831..cea3ffb5 100644 --- a/tensorflow/libs/points_set_property_op.cc +++ b/tensorflow/libs/points_set_property_op.cc @@ -57,9 +57,9 @@ class PointsNewOp : public OpKernel { // output Tensor* tsr = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{1}, &tsr)); - string& out_str = tsr->flat()(0); + tstring& out_str = tsr->flat()(0); const vector& points_buf = point_cloud.get_buffer(); - out_str.assign(points_buf.begin(), points_buf.end()); + out_str.assign(&points_buf[0], points_buf.size()); } }; @@ -80,7 +80,7 @@ class PointsSetPropertyOp : public OpKernel { int channel = data.dim_size(1); // copy the data out of the input tensor - auto points_array = data_in.flat(); + auto points_array = data_in.flat(); vector points_buf(points_array(0).begin(), points_array(0).end()); // init the points @@ -117,8 +117,8 @@ class PointsSetPropertyOp : public OpKernel { Tensor* out_data = nullptr; const TensorShape& shape = data_in.shape(); OP_REQUIRES_OK(context, context->allocate_output(0, shape, &out_data)); - string& out_str = out_data->flat()(0); - out_str.assign(points_buf.begin(), points_buf.end()); + tstring& out_str = out_data->flat()(0); + out_str.assign(&points_buf[0], points_buf.size()); } private: diff --git a/tensorflow/libs/transform_octree_op.cc b/tensorflow/libs/transform_octree_op.cc index cda56955..4410024b 100644 --- a/tensorflow/libs/transform_octree_op.cc +++ b/tensorflow/libs/transform_octree_op.cc @@ -44,7 +44,7 @@ class OctreeDropOp : public OpKernel { void Compute(OpKernelContext* context) override { // input const Tensor& data_in = context->input(0); - const string& octree_in = data_in.flat()(0); + const string& octree_in = data_in.flat()(0); int depth = context->input(1).flat()(0); float ratio = context->input(2).flat()(0); @@ -55,8 +55,8 @@ class OctreeDropOp : public OpKernel { Tensor* data_out = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, data_in.shape(), &data_out)); - string& str_out = data_out->flat()(0); - str_out.assign(octree_out.begin(), octree_out.end()); + tstring& str_out = data_out->flat()(0); + str_out.assign(&octree_out[0], octree_out.size()); } }; @@ -70,7 +70,7 @@ class OctreeScanOp : public OpKernel { // input OctreeParser octree_in; const Tensor& data_in = context->input(0); - octree_in.set_cpu(data_in.flat()(0).data()); + octree_in.set_cpu(data_in.flat()(0).data()); const Tensor& axis_in = context->input(1); auto ptr_in = axis_in.flat().data(); @@ -84,8 +84,8 @@ class OctreeScanOp : public OpKernel { Tensor* data_out = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, data_in.shape(), &data_out)); - string& str_out = data_out->flat()(0); - str_out.assign(octree_out.begin(), octree_out.end()); + tstring& str_out = data_out->flat()(0); + str_out.assign(&octree_out[0], octree_out.size()); } protected: @@ -104,8 +104,8 @@ class OctreeCastOp : public OpKernel { // output Tensor* data_out = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, {1}, &data_out)); - string& str_out = data_out->flat()(0); - str_out.assign(ptr_in, ptr_in + data_in.NumElements()); + tstring& str_out = data_out->flat()(0); + str_out.assign(ptr_in, data_in.NumElements()); } }; diff --git a/tensorflow/libs/transform_points_op.cc b/tensorflow/libs/transform_points_op.cc index ce8583e8..ebd97326 100644 --- a/tensorflow/libs/transform_points_op.cc +++ b/tensorflow/libs/transform_points_op.cc @@ -90,7 +90,7 @@ class TransformPointsOp : public OpKernel { } // copy the data out of the input tensor - auto points_array = data_in.flat(); + auto points_array = data_in.flat(); vector points_buf(points_array(0).begin(), points_array(0).end()); // init the points @@ -178,8 +178,8 @@ class TransformPointsOp : public OpKernel { Tensor* out_data = nullptr; const TensorShape& shape = data_in.shape(); OP_REQUIRES_OK(context, context->allocate_output(0, shape, &out_data)); - string& out_str = out_data->flat()(0); - out_str.assign(points_buf.begin(), points_buf.end()); + tstring& out_str = out_data->flat()(0); + out_str.assign(&points_buf[0], points_buf.size()); } private: @@ -195,7 +195,7 @@ class NormalizePointsOp : public OpKernel { void Compute(OpKernelContext* context) override { // input - const string& data_in = context->input(0).flat()(0); + const string& data_in = context->input(0).flat()(0); const float radius = context->input(1).flat()(0); const float* center = context->input(2).flat().data(); @@ -234,8 +234,8 @@ class NormalizePointsOp : public OpKernel { Tensor* out_data = nullptr; const TensorShape& shape = context->input(0).shape(); OP_REQUIRES_OK(context, context->allocate_output(0, shape, &out_data)); - string& out_str = out_data->flat()(0); - out_str.assign(points_buf.begin(), points_buf.end()); + tstring& out_str = out_data->flat()(0); + out_str.assign(&points_buf[0], points_buf.size()); } }; @@ -251,7 +251,7 @@ class BoundingSphereOp : public OpKernel { // init the points Points pts; - pts.set(data_in.flat()(0).data()); + pts.set(data_in.flat()(0).data()); // check the points string msg; diff --git a/tensorflow/libs_report.txt b/tensorflow/libs_report.txt new file mode 100644 index 00000000..9c06d15e --- /dev/null +++ b/tensorflow/libs_report.txt @@ -0,0 +1,113 @@ +TensorFlow 2.0 Upgrade Script +----------------------------- +Converted 2 files +Detected 4 issues that require attention +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +File: libs_14/__init__.py +-------------------------------------------------------------------------------- +libs_14/__init__.py:202:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +libs_14/__init__.py:220:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +libs_14/__init__.py:236:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +libs_14/__init__.py:256:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +================================================================================ +Detailed log follows: + +================================================================================ +================================================================================ +Input tree: 'libs_14/' +================================================================================ +-------------------------------------------------------------------------------- +Processing file 'libs_14/build.py' + outputting to 'libs/build.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'libs_14/__init__.py' + outputting to 'libs/__init__.py' +-------------------------------------------------------------------------------- + +167:10: INFO: Added keywords to args of function 'tf.shape' +173:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +180:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +187:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +189:11: INFO: Added keywords to args of function 'tf.reduce_mean' +200:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +202:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +202:13: INFO: Renamed 'tf.get_variable' to 'tf.compat.v1.get_variable' +203:41: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +218:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +220:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +220:13: INFO: Renamed 'tf.get_variable' to 'tf.compat.v1.get_variable' +221:41: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +233:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +236:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +236:13: INFO: Renamed 'tf.get_variable' to 'tf.compat.v1.get_variable' +237:41: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +253:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +256:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +256:13: INFO: Renamed 'tf.get_variable' to 'tf.compat.v1.get_variable' +257:41: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +267:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +269:11: INFO: Added keywords to args of function 'tf.transpose' +274:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +283:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +288:11: INFO: Added keywords to args of function 'tf.transpose' +294:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +299:11: INFO: Added keywords to args of function 'tf.transpose' +300:23: INFO: Added keywords to args of function 'tf.shape' +300:43: INFO: Added keywords to args of function 'tf.shape' +301:23: INFO: Added keywords to args of function 'tf.shape' +304:22: INFO: Added keywords to args of function 'tf.reduce_prod' +307:13: INFO: Renamed 'tf.div' to 'tf.compat.v1.div' +308:43: INFO: Added keywords to args of function 'tf.transpose' +315:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +325:11: INFO: Added keywords to args of function 'tf.transpose' +326:23: INFO: Added keywords to args of function 'tf.shape' +326:42: INFO: Added keywords to args of function 'tf.shape' +327:23: INFO: Added keywords to args of function 'tf.shape' +336:22: INFO: Added keywords to args of function 'tf.reduce_prod' +339:13: INFO: Renamed 'tf.div' to 'tf.compat.v1.div' +341:43: INFO: Added keywords to args of function 'tf.transpose' +342:41: INFO: Added keywords to args of function 'tf.transpose' +349:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +359:26: INFO: Added keywords to args of function 'tf.shape' +359:45: INFO: Added keywords to args of function 'tf.shape' +360:23: INFO: Added keywords to args of function 'tf.shape' +370:22: INFO: Added keywords to args of function 'tf.reduce_prod' +374:13: INFO: Renamed 'tf.div' to 'tf.compat.v1.div' +382:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +409:10: INFO: Added keywords to args of function 'tf.boolean_mask' +411:10: INFO: Added keywords to args of function 'tf.shape' +414:10: INFO: Added keywords to args of function 'tf.boolean_mask' +417:31: INFO: Added keywords to args of function 'tf.reduce_prod' +418:13: INFO: Added keywords to args of function 'tf.boolean_mask' +423:8: INFO: Added keywords to args of function 'tf.shape' +446:13: INFO: Renamed 'tf.div' to 'tf.compat.v1.div' +447:43: INFO: Added keywords to args of function 'tf.transpose' +452:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +452:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +457:12: INFO: Added keywords to args of function 'tf.boolean_mask' +472:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +491:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +491:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +499:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +499:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +509:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +509:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +517:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +517:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +-------------------------------------------------------------------------------- + diff --git a/tensorflow/script/config.py b/tensorflow/script/config.py index 2f64dc6b..e2d52075 100644 --- a/tensorflow/script/config.py +++ b/tensorflow/script/config.py @@ -53,6 +53,7 @@ _C.DATA.train.angle = (180, 180, 180) _C.DATA.train.location = '' # The data location +_C.DATA.train.xlocation = '' # The data location _C.DATA.train.shuffle = 1000 # The shuffle size _C.DATA.train.take = -1 # Use at most `take` elements from this dataset _C.DATA.train.batch_size = 32 # Training data batch size @@ -91,7 +92,7 @@ _C.LOSS.seg_num = 100 # The clustering number in MID training _C.LOSS.weights = (1.0, 1.0) # The weight factors for different losses _C.LOSS.label_smoothing = 0.0 # The factor of label smoothing - +_C.LOSS.infoGain = False # Use feq distribution # backup the commands _C.SYS = CN() diff --git a/tensorflow/script/configs/seg_hrnet_nncad_trn6.yaml b/tensorflow/script/configs/seg_hrnet_nncad_trn6.yaml new file mode 100644 index 00000000..3360b952 --- /dev/null +++ b/tensorflow/script/configs/seg_hrnet_nncad_trn6.yaml @@ -0,0 +1,51 @@ +# Parameters for the airplane + +SOLVER: + #ckpt: 'logs/seg/hrnetApr1/model/iter_150000.ckpt' + gpu: 0, + logdir: logs/seg/hrnetTF2 + max_iter: 130000 + #test_iter: 15070 + test_iter: 200 + test_every_iter: 2000 + step_size: (15000, ) + learning_rate: 0.1 + gamma: 0.316 + #run: test + run: train +DATA: + train: + dtype: octree + distort: False + depth: 6 + axis: y + uniform: True + node_dis: True + location: /mnt/f/bb/tfTrain.tfrecords + batch_size: 12 + x_alias: data + mask_ratio: 0.5 + + test: + dtype: octree + distort: False # no data augmentation + depth: 6 + axis: y + node_dis: True + location: /mnt/f/bb/tfTest.tfrecords + shuffle: 0 + batch_size: 1 + x_alias: data + +MODEL: + name: hrnet + channel: 4 + nout: 14 + depth: 6 + factor: 2 + signal_abs: True + depth_out: 6 + +LOSS: + num_class: 14 + weight_decay: 0.0005 diff --git a/tensorflow/script/configs/seg_hrnet_nncad_tst6.yaml b/tensorflow/script/configs/seg_hrnet_nncad_tst6.yaml new file mode 100644 index 00000000..a8e345f0 --- /dev/null +++ b/tensorflow/script/configs/seg_hrnet_nncad_tst6.yaml @@ -0,0 +1,51 @@ +# Parameters for the airplane + +SOLVER: + ckpt: 'logs/seg/hrnetDgnBx/model/iter_082000.ckpt' + gpu: 0, + logdir: logs/seg/hrnetDgnBx + max_iter: 130000 + test_iter: 0 + #test_iter: 200 + test_every_iter: 2000 + step_size: (15000, ) + learning_rate: 0.1 + gamma: 0.316 + run: test + #run: train +DATA: + train: + dtype: octree + distort: False + depth: 6 + axis: y + uniform: True + node_dis: True + location: /mnt/d/bb/tfTrain.tfrecords + batch_size: 12 + x_alias: data + mask_ratio: 0.5 + + test: + dtype: octree + distort: False # no data augmentation + depth: 6 + axis: y + node_dis: True + location: /mnt/d/aa/BASFlblbx6Fake/tfTest.tfrecords + shuffle: 0 + batch_size: 1 + x_alias: data + +MODEL: + name: hrnet + channel: 4 + nout: 14 + depth: 6 + factor: 2 + signal_abs: True + depth_out: 6 + +LOSS: + num_class: 14 + weight_decay: 0.0005 diff --git a/tensorflow/script/configs/seg_unet_nncad_tst.yaml b/tensorflow/script/configs/seg_unet_nncad_tst.yaml new file mode 100644 index 00000000..bcfa937a --- /dev/null +++ b/tensorflow/script/configs/seg_unet_nncad_tst.yaml @@ -0,0 +1,51 @@ +# Parameters for the airplane + +SOLVER: + ckpt: 'logs/seg/unetNTnormltst/model/iter_090000.ckpt' + gpu: 0, + logdir: logs/seg/unetNTnormltst + max_iter: 90000 + test_iter: 11387 + #test_iter: 200 + test_every_iter: 2000 + step_size: (15000, ) + learning_rate: 0.1 + gamma: 0.316 + run: test + #run: train +DATA: + train: + dtype: octree + distort: False + depth: 6 + axis: y + uniform: True + node_dis: True + location: /mnt/d/bb/tfTrain.tfrecords + batch_size: 24 + x_alias: data + mask_ratio: 0.5 + + test: + dtype: octree + distort: False # no data augmentation + depth: 6 + axis: y + node_dis: True + location: /mnt/d/bbNynas/tfTest.tfrecords + shuffle: 0 + batch_size: 1 + x_alias: data + +MODEL: + name: unet + channel: 4 + nout: 14 + depth: 6 + factor: 2 + signal_abs: True + depth_out: 6 + +LOSS: + num_class: 14 + weight_decay: 0.0005 diff --git a/tensorflow/script/dataset.py b/tensorflow/script/dataset.py index f5ce73d3..f05c9a17 100644 --- a/tensorflow/script/dataset.py +++ b/tensorflow/script/dataset.py @@ -9,13 +9,26 @@ class ParseExample: def __init__(self, x_alias='data', y_alias='label', **kwargs): self.x_alias = x_alias self.y_alias = y_alias - self.features = { x_alias : tf.FixedLenFeature([], tf.string), - y_alias : tf.FixedLenFeature([], tf.int64) } + self.features = { self.x_alias : tf.io.FixedLenFeature([], tf.string), + self.y_alias : tf.io.FixedLenFeature([], tf.int64) } def __call__(self, record): - parsed = tf.parse_single_example(record, self.features) + parsed = tf.io.parse_single_example(serialized=record, features=self.features) return parsed[self.x_alias], parsed[self.y_alias] +class ParseExample2: + def __init__(self, x_alias='data', y_alias='label', **kwargs): + self.x_alias1 = 'data1' + self.x_alias2 = 'data2' + self.y_alias = y_alias + self.features = { self.x_alias1 : tf.io.FixedLenFeature([], tf.string), + self.x_alias2 : tf.io.FixedLenFeature([], tf.string), + y_alias : tf.io.FixedLenFeature([], tf.int64) } + + def __call__(self, record): + parsed = tf.io.parse_single_example(serialized=record, features=self.features) + return parsed[self.x_alias1],parsed[self.x_alias2], parsed[self.y_alias] + class Points2Octree: def __init__(self, depth, full_depth=2, node_dis=False, node_feat=False, @@ -114,7 +127,7 @@ def __init__(self, parse_example, normalize_points, transform_points, points2oct def __call__(self, record_names, batch_size, shuffle_size=1000, return_iter=False, take=-1, return_pts=False, **kwargs): - with tf.name_scope('points_dataset'): + with tf.compat.v1.name_scope('points_dataset'): def preprocess(record): points, label = self.parse_example(record) points = self.normalize_points(points) @@ -130,27 +143,46 @@ def merge_octrees(octrees, *args): dataset = tf.data.TFRecordDataset(record_names).take(take).repeat() if shuffle_size > 1: dataset = dataset.shuffle(shuffle_size) - itr = dataset.map(preprocess, num_parallel_calls=16) \ + itr = tf.compat.v1.data.make_one_shot_iterator(dataset.map(preprocess, num_parallel_calls=16) \ .batch(batch_size).map(merge_octrees, num_parallel_calls=8) \ - .prefetch(8).make_one_shot_iterator() + .prefetch(8)) return itr if return_iter else itr.get_next() - class OctreeDataset: def __init__(self, parse_example): self.parse_example = parse_example def __call__(self, record_names, batch_size, shuffle_size=1000, return_iter=False, take=-1, **kwargs): - with tf.name_scope('octree_dataset'): + with tf.compat.v1.name_scope('octree_dataset'): def merge_octrees(octrees, labels): return octree_batch(octrees), labels dataset = tf.data.TFRecordDataset(record_names).take(take).repeat() if shuffle_size > 1: dataset = dataset.shuffle(shuffle_size) - itr = dataset.map(self.parse_example, num_parallel_calls=8) \ + itr = tf.compat.v1.data.make_one_shot_iterator(dataset.map(self.parse_example, num_parallel_calls=8) \ + .batch(batch_size).map(merge_octrees, num_parallel_calls=8) \ + .prefetch(8)) + return itr if return_iter else itr.get_next() + +class OctreeDataset2: + def __init__(self, parse_example2): + self.parse_example2 = parse_example2 + + def __call__(self, record_names, batch_size, shuffle_size=1000, + return_iter=False, take=-1, **kwargs): + with tf.compat.v1.name_scope('octree_dataset'): + def merge_octrees(octrees1,octrees2, labels): + bo1=octree_batch(octrees1) + bo2=octree_batch(octrees2) + #tf.print(tf.shape(labels),tf.shape(bo1),tf.shape(bo2),"taking labels") + return bo1,bo2, labels + + dataset = tf.data.TFRecordDataset(record_names).take(take).repeat() + #if shuffle_size > 1: dataset = dataset.shuffle(shuffle_size) + itr = tf.compat.v1.data.make_one_shot_iterator(dataset.map(self.parse_example2, num_parallel_calls=8) \ .batch(batch_size).map(merge_octrees, num_parallel_calls=8) \ - .prefetch(8).make_one_shot_iterator() + .prefetch(8)) return itr if return_iter else itr.get_next() @@ -163,6 +195,8 @@ def __init__(self, flags, normalize_points=NormalizePoints, transform_points(**flags), Points2Octree(**flags)) elif flags.dtype == 'octree': self.dataset = OctreeDataset(ParseExample(**flags)) + elif flags.dtype == 'octree2': + self.dataset = OctreeDataset2(ParseExample2(**flags)) else: print('Error: unsupported datatype ' + flags.dtype) diff --git a/tensorflow/script/feature.py b/tensorflow/script/feature.py index c80d54dc..6209c2e4 100644 --- a/tensorflow/script/feature.py +++ b/tensorflow/script/feature.py @@ -17,7 +17,7 @@ octree, label = DatasetFactory(FLAGS.DATA.test)() hrnet = HRNet(FLAGS.MODEL) tensors = hrnet.network(octree, training=False) -with tf.variable_scope('signal'): +with tf.compat.v1.variable_scope('signal'): child = octree_property(octree, property_name='child', dtype=tf.int32, depth=FLAGS.DATA.test.depth, channel=1) child = tf.reshape(child, [-1]) @@ -72,11 +72,11 @@ def seg_features(sess): np.save(output_prefix + '_%03d.fc2.npy' % i, f) assert(FLAGSS.ckpt) -tf_saver = tf.train.Saver() -config = tf.ConfigProto() +tf_saver = tf.compat.v1.train.Saver() +config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True -with tf.Session(config=config) as sess: - tf.summary.FileWriter(FLAGSS.logdir, sess.graph) +with tf.compat.v1.Session(config=config) as sess: + tf.compat.v1.summary.FileWriter(FLAGSS.logdir, sess.graph) print('Restore from checkpoint: ', FLAGSS.ckpt) tf_saver.restore(sess, FLAGSS.ckpt) diff --git a/tensorflow/script/learning_rate.py b/tensorflow/script/learning_rate.py index 2ea7b67f..4779d11f 100644 --- a/tensorflow/script/learning_rate.py +++ b/tensorflow/script/learning_rate.py @@ -6,14 +6,14 @@ def __init__(self, flags): self.flags = flags def __call__(self, global_step): - with tf.variable_scope('cos_lr'): + with tf.compat.v1.variable_scope('cos_lr'): pi, mul = 3.1415926, 0.001 step_size = self.flags.step_size[0] max_iter = self.flags.max_iter * 0.9 max_epoch = max_iter / step_size lr_max = self.flags.learning_rate lr_min = self.flags.learning_rate * mul - epoch = tf.floordiv(tf.cast(global_step, tf.float32), step_size) + epoch = tf.math.floordiv(tf.cast(global_step, tf.float32), step_size) val = tf.minimum(epoch / max_epoch, 1.0) lr = lr_min + 0.5 * (lr_max - lr_min) * (1.0 + tf.cos(pi * val)) return lr @@ -24,17 +24,17 @@ def __init__(self, flags): self.flags = flags def __call__(self, global_step): - with tf.variable_scope('step_lr'): + with tf.compat.v1.variable_scope('step_lr'): step_size = list(self.flags.step_size) - for i in range(len(step_size), 5): + for i in range(len(step_size), 8): step_size.append(step_size[-1]) steps = step_size - for i in range(1, 5): + for i in range(1, 8): steps[i] = steps[i-1] + steps[i] - lr_values = [self.flags.gamma**i * self.flags.learning_rate for i in range(0, 6)] + lr_values = [self.flags.gamma**i * self.flags.learning_rate for i in range(0, 9)] - lr = tf.train.piecewise_constant(global_step, steps, lr_values) + lr = tf.compat.v1.train.piecewise_constant(global_step, steps, lr_values) return lr diff --git a/tensorflow/script/mid_loss.py b/tensorflow/script/mid_loss.py index c1e490aa..cdb202ad 100644 --- a/tensorflow/script/mid_loss.py +++ b/tensorflow/script/mid_loss.py @@ -7,22 +7,22 @@ def __init__(self, flags, reuse=False): self.reuse = reuse def _def_memory(self, channel): - with tf.variable_scope('shape_memory'): - self.memory = tf.get_variable('memory', + with tf.compat.v1.variable_scope('shape_memory'): + self.memory = tf.compat.v1.get_variable('memory', shape=[self.flags.inst_num, channel], trainable=False, - initializer=tf.contrib.layers.xavier_initializer()) + initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) def forward(self, feature): - with tf.variable_scope('shape_cls', reuse=self.reuse): + with tf.compat.v1.variable_scope('shape_cls', reuse=self.reuse): self._def_memory(int(feature.shape[1])) self.feature = tf.nn.l2_normalize(feature, axis=1) logit = tf.matmul(self.feature, self.memory, transpose_a=False, transpose_b=True) - logit = tf.div(logit, self.flags.sigma) + logit = tf.compat.v1.div(logit, self.flags.sigma) return logit def loss(self, logit, shape_id): self.shape_id = shape_id # this is the ground-truth label - with tf.name_scope('shape_loss'): + with tf.compat.v1.name_scope('shape_loss'): loss = softmax_loss(logit, self.shape_id, self.flags.inst_num) accu = softmax_accuracy(logit, self.shape_id) return loss, accu @@ -30,23 +30,23 @@ def loss(self, logit, shape_id): def update_memory(self, solver): # update memory bank after solver with tf.control_dependencies([solver]): - with tf.name_scope('update_shape_memory'): + with tf.compat.v1.name_scope('update_shape_memory'): momentum = self.flags.momentum weight = tf.gather(self.memory, self.shape_id) weight = self.feature * momentum + weight * (1 - momentum) weight = tf.nn.l2_normalize(weight, 1) - memory = tf.scatter_update(self.memory, self.shape_id, weight) + memory = tf.compat.v1.scatter_update(self.memory, self.shape_id, weight) return memory def knn_accuracy(self, logit, label_test, label_train, class_num=10, K=200): - with tf.name_scope('knn_accu'): + with tf.compat.v1.name_scope('knn_accu'): one_hot_train = tf.one_hot(label_train, depth=class_num) top_k_values, top_k_indices = tf.nn.top_k(logit, k=K) # k nearest points top_k_label = tf.gather(one_hot_train, top_k_indices) # gather label weight = tf.expand_dims(tf.exp(top_k_values), axis=-1) # predict weighted_label = tf.multiply(top_k_label, weight) - sum_up_predictions = tf.reduce_sum(weighted_label, axis=1) - label_pred = tf.argmax(sum_up_predictions, axis=1) + sum_up_predictions = tf.reduce_sum(input_tensor=weighted_label, axis=1) + label_pred = tf.argmax(input=sum_up_predictions, axis=1) accu = label_accuracy(label_pred, label_test) return accu @@ -57,22 +57,22 @@ def __init__(self, flags, reuse=False): self.reuse = reuse def _def_memory(self, channel): - with tf.variable_scope('point_memory'): - self.memory = tf.get_variable('memory', trainable=False, + with tf.compat.v1.variable_scope('point_memory'): + self.memory = tf.compat.v1.get_variable('memory', trainable=False, shape=[self.flags.inst_num, self.flags.seg_num, channel], - initializer=tf.contrib.layers.xavier_initializer()) + initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) def forward(self, feature, shape_id, obj_segment, batch_size): self.shape_id = shape_id self.obj_segment = obj_segment self.batch_size = batch_size - with tf.variable_scope('point_cls', reuse=self.reuse): + with tf.compat.v1.variable_scope('point_cls', reuse=self.reuse): self._def_memory(int(feature.shape[1])) self.feature = tf.nn.l2_normalize(feature, axis=1) # split the feature - node_nums = tf.segment_sum(tf.ones_like(obj_segment), obj_segment) + node_nums = tf.math.segment_sum(tf.ones_like(obj_segment), obj_segment) node_nums = tf.reshape(node_nums, [self.batch_size]) features = tf.split(self.feature, node_nums) @@ -84,13 +84,13 @@ def forward(self, feature, shape_id, obj_segment, batch_size): # logit logit = tf.concat(out, axis=0) - logit = tf.div(logit, self.flags.sigma) + logit = tf.compat.v1.div(logit, self.flags.sigma) return logit def loss(self, logit, point_id): self.point_id = point_id - with tf.name_scope('point_loss'): + with tf.compat.v1.name_scope('point_loss'): # point_mask = point_id > -1 # filter label -1 # logit = tf.boolean_mask(logit, point_mask) # point_id = tf.boolean_mask(point_id, point_mask) @@ -102,7 +102,7 @@ def loss(self, logit, point_id): def update_memory(self, solver): # update memory bank after solver with tf.control_dependencies([solver]): - with tf.name_scope('update_point_memory'): + with tf.compat.v1.name_scope('update_point_memory'): feature = self.feature seg_num, point_id = self.flags.seg_num, self.point_id # point_mask = point_id > -1 # filter label -1 @@ -111,7 +111,7 @@ def update_memory(self, solver): # feature = tf.boolean_mask(feature, point_mask) batch_size = self.batch_size - feature = tf.unsorted_segment_mean(feature, point_id, seg_num*batch_size) + feature = tf.math.unsorted_segment_mean(feature, point_id, seg_num*batch_size) feature = tf.nn.l2_normalize(feature, axis=1) feature = tf.reshape(feature, [batch_size, seg_num, -1]) @@ -119,5 +119,5 @@ def update_memory(self, solver): weight = tf.gather(self.memory, self.shape_id) weight = feature * momentum + weight * (1 - momentum) weight = tf.nn.l2_normalize(weight, axis=2) - memory = tf.scatter_update(self.memory, self.shape_id, weight) + memory = tf.compat.v1.scatter_update(self.memory, self.shape_id, weight) return memory diff --git a/tensorflow/script/network_ae.py b/tensorflow/script/network_ae.py index 188767a8..8c50600b 100644 --- a/tensorflow/script/network_ae.py +++ b/tensorflow/script/network_ae.py @@ -10,21 +10,21 @@ def octree_encoder(self, octree, training, reuse=False): flags = self.flags depth, nout = flags.depth, flags.nout channel = [4, nout, 128, 64, 32, 16, 8] - with tf.variable_scope('ocnn_encoder', reuse=reuse): - with tf.variable_scope('signal_gt'): + with tf.compat.v1.variable_scope('ocnn_encoder', reuse=reuse): + with tf.compat.v1.variable_scope('signal_gt'): data = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) for d in range(depth, 1, -1): - with tf.variable_scope('depth_%d' % d): + with tf.compat.v1.variable_scope('depth_%d' % d): data = octree_conv_bn_relu(data, octree, d, channel[d], training) data, _ = octree_max_pool(data, octree, d) - with tf.variable_scope('depth_1'): + with tf.compat.v1.variable_scope('depth_1'): data = downsample(data, channel[1], training) - with tf.variable_scope('code'): + with tf.compat.v1.variable_scope('code'): code = conv2d_bn(data, channel[1], kernel_size=1, stride=1, training=training) code = tf.nn.tanh(code) return code @@ -33,39 +33,39 @@ def octree_decoder(self, code, octree, training, reuse=False): flags = self.flags depth = flags.depth channel = [512, 256, 128, 64, 32, 16, 8] - with tf.variable_scope('ocnn_decoder', reuse=reuse): + with tf.compat.v1.variable_scope('ocnn_decoder', reuse=reuse): label_gt = [None]*10 - with tf.variable_scope('label_gt'): + with tf.compat.v1.variable_scope('label_gt'): for d in range(2, depth + 1): label = octree_property(octree, property_name="split", dtype=tf.float32, depth=d, channel=1) label_gt[d] = tf.reshape(tf.cast(label, dtype=tf.int32), [-1]) - with tf.variable_scope('signal_gt'): + with tf.compat.v1.variable_scope('signal_gt'): signal_gt = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) signal_gt = tf.reshape(signal_gt, [1, flags.channel, -1, 1]) data = code - with tf.variable_scope('depth_1'): + with tf.compat.v1.variable_scope('depth_1'): data = upsample(data, channel[1], training) loss = []; accu = []; for d in range(2, depth + 1): - with tf.variable_scope('depth_%d' % d): + with tf.compat.v1.variable_scope('depth_%d' % d): data = octree_upsample(data, octree, d-1, channel[d], training) data = octree_conv_bn_relu(data, octree, d, channel[d], training) - with tf.variable_scope('predict_%d' % d): + with tf.compat.v1.variable_scope('predict_%d' % d): logit, label = predict_label(data, 2, 32, training) - with tf.variable_scope('loss_%d' % d): - logit = tf.transpose(tf.squeeze(logit, [0,3])) # (1, C, H, 1) -> (H, C) + with tf.compat.v1.variable_scope('loss_%d' % d): + logit = tf.transpose(a=tf.squeeze(logit, [0,3])) # (1, C, H, 1) -> (H, C) loss.append(softmax_loss(logit, label_gt[d], num_class=2)) accu.append(label_accuracy(label, label_gt[d])) if d == depth: - with tf.variable_scope('regress_%d' % d): + with tf.compat.v1.variable_scope('regress_%d' % d): signal = predict_signal(data, flags.channel, 32, training) loss.append(regress_loss(signal, signal_gt)) @@ -75,39 +75,39 @@ def octree_decode_shape(self, code, training, reuse=False): flags = self.flags depth = flags.depth channel = [512, 256, 128, 64, 32, 16, 8] - with tf.variable_scope('ocnn_decoder', reuse=reuse): - with tf.variable_scope('octree_0'): + with tf.compat.v1.variable_scope('ocnn_decoder', reuse=reuse): + with tf.compat.v1.variable_scope('octree_0'): displace = False if flags.channel < 4 else True octree = octree_new(batch_size=1, channel=flags.channel, has_displace=displace) - with tf.variable_scope('octree_1'): + with tf.compat.v1.variable_scope('octree_1'): octree = octree_grow(octree, target_depth=1, full_octree=True) - with tf.variable_scope('octree_2'): + with tf.compat.v1.variable_scope('octree_2'): octree = octree_grow(octree, target_depth=2, full_octree=True) data = code - with tf.variable_scope('depth_1'): + with tf.compat.v1.variable_scope('depth_1'): data = upsample(data, channel[1], training) for d in range(2, depth + 1): - with tf.variable_scope('depth_%d' % d): + with tf.compat.v1.variable_scope('depth_%d' % d): data = octree_upsample(data, octree, d-1, channel[d], training) data = octree_conv_bn_relu(data, octree, d, channel[d], training) - with tf.variable_scope('predict_%d' % d): + with tf.compat.v1.variable_scope('predict_%d' % d): _, label = predict_label(data, 2, 32, training) - with tf.variable_scope('octree_%d' % d, reuse=True): + with tf.compat.v1.variable_scope('octree_%d' % d, reuse=True): octree = octree_update(octree, label, depth=d, mask=1) # octree = octree_update(octree, label_gt[d], depth=d, mask=1) if d < depth: - with tf.variable_scope('octree_%d' % (d+1)): + with tf.compat.v1.variable_scope('octree_%d' % (d+1)): octree = octree_grow(octree, target_depth=d+1, full_octree=False) else: - with tf.variable_scope('regress_%d' % d): + with tf.compat.v1.variable_scope('regress_%d' % d): signal = predict_signal(data, flags.channel, 32, training) signal = normalize_signal(signal) signal = octree_mask(signal, label, mask=0) - with tf.variable_scope('octree_%d' % d, reuse=True): + with tf.compat.v1.variable_scope('octree_%d' % d, reuse=True): octree = octree_set_property(octree, signal, property_name="feature", depth=depth) return octree @@ -119,24 +119,24 @@ def octree_encoder(self, octree, training, reuse=False): flags = self.flags depth, nout = flags.depth, flags.nout channels = [4, nout, 256, 256, 128, 64, 32, 16] - with tf.variable_scope('ocnn_encoder', reuse=reuse): - with tf.variable_scope('signal_gt'): + with tf.compat.v1.variable_scope('ocnn_encoder', reuse=reuse): + with tf.compat.v1.variable_scope('signal_gt'): data = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) - with tf.variable_scope("front"): + with tf.compat.v1.variable_scope("front"): data = octree_conv_bn_relu(data, octree, depth, channels[depth], training) for d in range(depth, 2, -1): for i in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d_%d' % (d, i)): + with tf.compat.v1.variable_scope('resblock_%d_%d' % (d, i)): data = octree_resblock(data, octree, d, channels[d], 1, training) - with tf.variable_scope('down_%d' % d): + with tf.compat.v1.variable_scope('down_%d' % d): data = octree_conv_bn_relu(data, octree, d, channels[d-1], training, stride=2, kernel_size=[2]) - with tf.variable_scope('code'): + with tf.compat.v1.variable_scope('code'): # code = conv2d_bn(data, channels[1], kernel_size=1, stride=1, training=training) code = octree_conv1x1_bn(data, flags.nout, training=training) code = tf.nn.tanh(code) @@ -146,20 +146,20 @@ def octree_decoder(self, code, octree, training, reuse=False): flags = self.flags depth = flags.depth channels = [4, 64, 256, 256, 128, 64, 32, 16] - with tf.variable_scope('ocnn_decoder', reuse=reuse): + with tf.compat.v1.variable_scope('ocnn_decoder', reuse=reuse): data = code loss, accu = [], [] for d in range(2, depth + 1): for i in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d_%d' % (d, i)): + with tf.compat.v1.variable_scope('resblock_%d_%d' % (d, i)): data = octree_resblock(data, octree, d, channels[d], 1, training) - with tf.variable_scope('predict_%d' % d): + with tf.compat.v1.variable_scope('predict_%d' % d): logit, label = predict_label(data, 2, 32, training) - logit = tf.transpose(tf.squeeze(logit, [0,3])) # (1, C, H, 1) -> (H, C) + logit = tf.transpose(a=tf.squeeze(logit, [0,3])) # (1, C, H, 1) -> (H, C) - with tf.variable_scope('loss_%d' % d): - with tf.variable_scope('label_gt'): + with tf.compat.v1.variable_scope('loss_%d' % d): + with tf.compat.v1.variable_scope('label_gt'): label_gt = octree_property(octree, property_name="split", dtype=tf.float32, depth=d, channel=1) label_gt = tf.reshape(tf.cast(label_gt, dtype=tf.int32), [-1]) @@ -167,18 +167,18 @@ def octree_decoder(self, code, octree, training, reuse=False): accu.append(label_accuracy(label, label_gt)) if d == depth: - with tf.variable_scope('regress_%d' % d): + with tf.compat.v1.variable_scope('regress_%d' % d): signal = predict_signal(data, flags.channel, 32, training) - with tf.variable_scope('loss_regress'): - with tf.variable_scope('signal_gt'): + with tf.compat.v1.variable_scope('loss_regress'): + with tf.compat.v1.variable_scope('signal_gt'): signal_gt = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) signal_gt = tf.reshape(signal_gt, [1, flags.channel, -1, 1]) loss.append(regress_loss(signal, signal_gt)) if d < depth: - with tf.variable_scope('up_%d' % d): + with tf.compat.v1.variable_scope('up_%d' % d): data = octree_deconv_bn_relu(data, octree, d, channels[d-1], training, stride=2, kernel_size=[2]) return loss, accu @@ -187,39 +187,39 @@ def octree_decode_shape(self, code, training, reuse=False): flags = self.flags depth = flags.depth channels = [4, 64, 256, 256, 128, 64, 32, 16] - with tf.variable_scope('ocnn_decoder', reuse=reuse): - with tf.variable_scope('octree_0'): + with tf.compat.v1.variable_scope('ocnn_decoder', reuse=reuse): + with tf.compat.v1.variable_scope('octree_0'): displace = False if flags.channel < 4 else True octree = octree_new(batch_size=1, channel=flags.channel, has_displace=displace) - with tf.variable_scope('octree_1'): + with tf.compat.v1.variable_scope('octree_1'): octree = octree_grow(octree, target_depth=1, full_octree=True) - with tf.variable_scope('octree_2'): + with tf.compat.v1.variable_scope('octree_2'): octree = octree_grow(octree, target_depth=2, full_octree=True) data = code for d in range(2, depth + 1): for i in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d_%d' % (d, i)): + with tf.compat.v1.variable_scope('resblock_%d_%d' % (d, i)): data = octree_resblock(data, octree, d, channels[d], 1, training) - with tf.variable_scope('predict_%d' % d): + with tf.compat.v1.variable_scope('predict_%d' % d): _, label = predict_label(data, 2, 32, training) - with tf.variable_scope('octree_%d' % d, reuse=True): + with tf.compat.v1.variable_scope('octree_%d' % d, reuse=True): octree = octree_update(octree, label, depth=d, mask=1) if d < depth: - with tf.variable_scope('octree_%d' % (d+1)): + with tf.compat.v1.variable_scope('octree_%d' % (d+1)): octree = octree_grow(octree, target_depth=d+1, full_octree=False) else: - with tf.variable_scope('regress_%d' % d): + with tf.compat.v1.variable_scope('regress_%d' % d): signal = predict_signal(data, flags.channel, 32, training) signal = normalize_signal(signal) signal = octree_mask(signal, label, mask=0) - with tf.variable_scope('octree_%d' % d, reuse=True): + with tf.compat.v1.variable_scope('octree_%d' % d, reuse=True): octree = octree_set_property(octree, signal, property_name="feature", depth=depth) if d < depth: - with tf.variable_scope('up_%d' % d): + with tf.compat.v1.variable_scope('up_%d' % d): data = octree_deconv_bn_relu(data, octree, d, channels[d-1], training, stride=2, kernel_size=[2]) return octree diff --git a/tensorflow/script/network_aenet.py b/tensorflow/script/network_aenet.py new file mode 100644 index 00000000..7fd80dcd --- /dev/null +++ b/tensorflow/script/network_aenet.py @@ -0,0 +1,76 @@ +import tensorflow as tf +from ocnn import * + + +def network_aenet(octree, flags, training, reuse=False): + + depth, nout = flags.depth, flags.nout + channel = [None, None, None, 256, 128, 32, 16] + cout=[None, None, 256, 128, 32, 16, 4] + + with tf.compat.v1.variable_scope('ocnn_unet', reuse=reuse): + with tf.compat.v1.variable_scope('signal'): + data = octree_property(octree, property_name='feature', dtype=tf.float32, + depth=depth, channel=flags.channel) + data = tf.reshape(data, [1, flags.channel, -1, 1]) + + ## encoder + mask = [None]*10 + for d in range(depth, 2, -1): + #for d in range(depth, 5, -1): + with tf.compat.v1.variable_scope('encoder_d%d' % d): + # data=tf.Print(data,[tf.shape(data)],"step a",summarize=10) + data = octree_conv_bn_relu(data, octree, d, channel[d], training) + if d==5: + data = tf.compat.v1.layers.dropout(data, rate=0.5, training=training) + # data=tf.Print(data,[tf.shape(data)],"step b",summarize=10) + data,mask[d] = octree_max_pool(data, octree, d) + # data=tf.Print(data,[tf.shape(data)],"step c",summarize=10) + + + ## decoder + assert d != 2 ,print("trouble 1") + data = octree_conv_bn_relu(data, octree, 2, 256, training) + data = tf.compat.v1.layers.dropout(data, rate=0.5, training=training) + + for d in range(3, depth + 1): + #for d in range(6, depth + 1): + with tf.compat.v1.variable_scope('decoder_d%d' % (d-1)): + # mask[d]=tf.Print(mask[d],[tf.shape(mask[d])],"step mask",summarize=10) + # data=tf.Print(data,[tf.shape(data)],"step 1",summarize=10) + data= octree_max_unpool(data, mask[d], octree, d-1) + #data=tf.Print(data,[tf.shape(data)],"step 2",summarize=10) + data = octree_conv_bn_relu(data, octree, d, cout[d], training) + if d==5: + data = tf.compat.v1.layers.dropout(data, rate=0.5, training=training) + #data=tf.Print(data,[tf.shape(data)],"step 3",summarize=10) + + # segmentation + if d == depth: + with tf.compat.v1.variable_scope('predict_label'): + logit = predict_module(data, flags.nout, 64, training) + logit = tf.transpose(a=tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) + + return logit + + # downsampling + # dd = d if d == depth else d + 1 + # stride = 1 if d == depth else 2 + # kernel_size = [3] if d == depth else [2] + # convd[d] = octree_conv_bn_relu(convd[d+1], octree, dd, nout[d], training, + # stride=stride, kernel_size=kernel_size) + # # resblock + # for n in range(0, flags.resblock_num): + # with tf.variable_scope('resblock_%d' % n): + # convd[d] = octree_resblock(convd[d], octree, d, nout[d], 1, training) + # upsampling + # deconv = octree_tile(deconv, d-1, d, octree=octree1) + # deconv = octree_upsample(deconv, octree, d-1, nout[d], training) + # deconv = octree_deconv_bn_relu(deconv, octree, d-1, nout[d], training, + # kernel_size=[2], stride=2, fast_mode=False) + # deconv = convd[d] + deconv # skip connections + + # # resblock + # for n in range(0, flags.resblock_num): + # with tf.variable_scope('resblock_%d' % n): + # deconv = octree_resblock(deconv, octree, d, nout[d], 1, training) \ No newline at end of file diff --git a/tensorflow/script/network_cls.py b/tensorflow/script/network_cls.py index 87f57c57..0776dcf4 100644 --- a/tensorflow/script/network_cls.py +++ b/tensorflow/script/network_cls.py @@ -6,29 +6,29 @@ def network_resnet(octree, flags, training=True, reuse=None): depth = flags.depth channels = [2048, 1024, 512, 256, 128, 64, 32, 16, 8] - with tf.variable_scope("ocnn_resnet", reuse=reuse): + with tf.compat.v1.variable_scope("ocnn_resnet", reuse=reuse): data = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) - with tf.variable_scope("conv1"): + with tf.compat.v1.variable_scope("conv1"): data = octree_conv_bn_relu(data, octree, depth, channels[depth], training) for d in range(depth, 2, -1): for i in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d_%d' % (d, i)): + with tf.compat.v1.variable_scope('resblock_%d_%d' % (d, i)): data = octree_resblock(data, octree, d, channels[d], 1, training) - with tf.variable_scope('max_pool_%d' % d): + with tf.compat.v1.variable_scope('max_pool_%d' % d): data, _ = octree_max_pool(data, octree, d) - with tf.variable_scope("global_average"): + with tf.compat.v1.variable_scope("global_average"): data = octree_full_voxel(data, depth=2) - data = tf.reduce_mean(data, 2) + data = tf.reduce_mean(input_tensor=data, axis=2) if flags.dropout[0]: - data = tf.layers.dropout(data, rate=0.5, training=training) + data = tf.compat.v1.layers.dropout(data, rate=0.5, training=training) - with tf.variable_scope("fc2"): + with tf.compat.v1.variable_scope("fc2"): logit = dense(data, flags.nout, use_bias=True) return logit @@ -38,25 +38,25 @@ def network_resnet(octree, flags, training=True, reuse=None): def network_ocnn(octree, flags, training=True, reuse=None): depth = flags.depth channels = [512, 256, 128, 64, 32, 16, 8, 4, 2] - with tf.variable_scope("ocnn", reuse=reuse): + with tf.compat.v1.variable_scope("ocnn", reuse=reuse): data = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) for d in range(depth, 2, -1): - with tf.variable_scope('depth_%d' % d): + with tf.compat.v1.variable_scope('depth_%d' % d): data = octree_conv_bn_relu(data, octree, d, channels[d], training) data, _ = octree_max_pool(data, octree, d) - with tf.variable_scope("full_voxel"): + with tf.compat.v1.variable_scope("full_voxel"): data = octree_full_voxel(data, depth=2) - data = tf.layers.dropout(data, rate=0.5, training=training) + data = tf.compat.v1.layers.dropout(data, rate=0.5, training=training) - with tf.variable_scope("fc1"): + with tf.compat.v1.variable_scope("fc1"): data = fc_bn_relu(data, channels[2], training=training) - data = tf.layers.dropout(data, rate=0.5, training=training) + data = tf.compat.v1.layers.dropout(data, rate=0.5, training=training) - with tf.variable_scope("fc2"): + with tf.compat.v1.variable_scope("fc2"): logit = dense(data, flags.nout, use_bias=True) return logit diff --git a/tensorflow/script/network_completion.py b/tensorflow/script/network_completion.py index ae1a9a35..a77a780f 100644 --- a/tensorflow/script/network_completion.py +++ b/tensorflow/script/network_completion.py @@ -27,22 +27,22 @@ def octree_encoder(self, octree, training, reuse=False): flags, channels = self.flags, self.channels depth, convd = flags.depth, [None] * 10 - with tf.variable_scope('ocnn_encoder', reuse=reuse): - with tf.variable_scope('signal_gt'): + with tf.compat.v1.variable_scope('ocnn_encoder', reuse=reuse): + with tf.compat.v1.variable_scope('signal_gt'): data = get_input_signal(octree, depth, flags.channel) - with tf.variable_scope("front_%d" % depth): + with tf.compat.v1.variable_scope("front_%d" % depth): convd[depth] = octree_conv_bn_relu( data, octree, depth, channels[depth], training) for d in range(depth, 1, -1): for i in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d_%d' % (d, i)): + with tf.compat.v1.variable_scope('resblock_%d_%d' % (d, i)): convd[d] = octree_resblock( convd[d], octree, d, channels[d], 1, training) if d > 2: - with tf.variable_scope('down_%d' % d): + with tf.compat.v1.variable_scope('down_%d' % d): convd[d-1] = octree_conv_bn_relu( convd[d], octree, d, channels[d-1], training, stride=2, kernel_size=[2]) @@ -53,10 +53,10 @@ def octree_decoder(self, convd, octree0, octree1, training, reuse=False): depth, deconv = flags.depth, convd[2] loss, accu = [], [] - with tf.variable_scope('ocnn_decoder', reuse=reuse): + with tf.compat.v1.variable_scope('ocnn_decoder', reuse=reuse): for d in range(2, depth + 1): if d > 2: - with tf.variable_scope('up_%d' % d): + with tf.compat.v1.variable_scope('up_%d' % d): deconv = octree_deconv_bn_relu( deconv, octree1, d-1, channels[d], training, stride=2, kernel_size=[2]) @@ -65,27 +65,27 @@ def octree_decoder(self, convd, octree0, octree1, training, reuse=False): deconv = deconv + skip for i in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d_%d' % (d, i)): + with tf.compat.v1.variable_scope('resblock_%d_%d' % (d, i)): deconv = octree_resblock( deconv, octree1, d, channels[d], 1, training) - with tf.variable_scope('predict_%d' % d): + with tf.compat.v1.variable_scope('predict_%d' % d): logit, label = predict_label(deconv, 2, 32, training) # (1, C, H, 1) -> (H, C) - logit = tf.transpose(tf.squeeze(logit, [0, 3])) + logit = tf.transpose(a=tf.squeeze(logit, [0, 3])) - with tf.variable_scope('loss_%d' % d): - with tf.variable_scope('label_gt'): + with tf.compat.v1.variable_scope('loss_%d' % d): + with tf.compat.v1.variable_scope('label_gt'): label_gt = get_split_label(octree1, d) loss.append(softmax_loss(logit, label_gt, num_class=2)) accu.append(label_accuracy(label, label_gt)) if d == depth: - with tf.variable_scope('regress_%d' % d): + with tf.compat.v1.variable_scope('regress_%d' % d): signal = predict_signal(deconv, flags.channel, 32, training) - with tf.variable_scope('loss_regress'): - with tf.variable_scope('signal_gt'): + with tf.compat.v1.variable_scope('loss_regress'): + with tf.compat.v1.variable_scope('signal_gt'): signal_gt = get_input_signal(octree1, depth, flags.channel) loss.append(regress_loss(signal, signal_gt)) return loss, accu @@ -94,19 +94,19 @@ def decode_shape(self, convd, octree0, training, reuse=False): flags, channels = self.flags, self.channels depth, deconv = flags.depth, convd[2] - with tf.variable_scope('ocnn_decoder', reuse=reuse): + with tf.compat.v1.variable_scope('ocnn_decoder', reuse=reuse): # init the octree - with tf.variable_scope('octree_0'): + with tf.compat.v1.variable_scope('octree_0'): dis = False if flags.channel < 4 else True octree = octree_new(1, channel=flags.channel, has_displace=dis) - with tf.variable_scope('octree_1'): + with tf.compat.v1.variable_scope('octree_1'): octree = octree_grow(octree, target_depth=1, full_octree=True) - with tf.variable_scope('octree_2'): + with tf.compat.v1.variable_scope('octree_2'): octree = octree_grow(octree, target_depth=2, full_octree=True) for d in range(2, depth + 1): if d > 2: - with tf.variable_scope('up_%d' % d): + with tf.compat.v1.variable_scope('up_%d' % d): deconv = octree_deconv_bn_relu( deconv, octree, d-1, channels[d], training, stride=2, kernel_size=[2]) @@ -115,24 +115,24 @@ def decode_shape(self, convd, octree0, training, reuse=False): deconv = deconv + skip for i in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d_%d' % (d, i)): + with tf.compat.v1.variable_scope('resblock_%d_%d' % (d, i)): deconv = octree_resblock( deconv, octree, d, channels[d], 1, training) - with tf.variable_scope('predict_%d' % d): + with tf.compat.v1.variable_scope('predict_%d' % d): _, label = predict_label(deconv, 2, 32, training) - with tf.variable_scope('octree_%d' % d): + with tf.compat.v1.variable_scope('octree_%d' % d): octree = octree_update(octree, label, depth=d, mask=1) if d < depth: - with tf.variable_scope('octree_%d' % (d+1)): + with tf.compat.v1.variable_scope('octree_%d' % (d+1)): octree = octree_grow(octree, target_depth=d+1, full_octree=False) else: - with tf.variable_scope('regress_%d' % d): + with tf.compat.v1.variable_scope('regress_%d' % d): signal = predict_signal(deconv, flags.channel, 32, training) signal = normalize_signal(signal) signal = octree_mask(signal, label, mask=0) - with tf.variable_scope('octree_%d' % d): + with tf.compat.v1.variable_scope('octree_%d' % d): octree = octree_set_property( octree, signal, property_name="feature", depth=depth) return octree diff --git a/tensorflow/script/network_factory.py b/tensorflow/script/network_factory.py index 87500a00..c25498fd 100644 --- a/tensorflow/script/network_factory.py +++ b/tensorflow/script/network_factory.py @@ -24,4 +24,3 @@ def seg_network(octree, flags, training, reuse=False, pts=None, mask=None): else: print('Error, no network: ' + flags.name) - \ No newline at end of file diff --git a/tensorflow/script/network_hrnet.py b/tensorflow/script/network_hrnet.py index 983babb9..aa3b6274 100644 --- a/tensorflow/script/network_hrnet.py +++ b/tensorflow/script/network_hrnet.py @@ -16,7 +16,7 @@ def __call__(self, data, octree, d, mask=None): def branch(data, octree, depth, channel, block_num, training): if depth > 5: block_num = block_num // 2 # !!! whether should we add this !!! for i in range(block_num): - with tf.variable_scope('resblock_d%d_%d' % (depth, i)): + with tf.compat.v1.variable_scope('resblock_d%d_%d' % (depth, i)): # data = octree_resblock2(data, octree, depth, channel, training) bottleneck = 4 if channel < 256 else 8 data = octree_resblock(data, octree, depth, channel, 1, training, bottleneck) @@ -24,7 +24,7 @@ def branch(data, octree, depth, channel, block_num, training): def branches(data, octree, depth, channel, block_num, training): for i in range(len(data)): - with tf.variable_scope('branch_%d' % (depth - i)): + with tf.compat.v1.variable_scope('branch_%d' % (depth - i)): depth_i, channel_i = depth - i, (2 ** i) * channel # if channel_i > 256: channel_i = 256 data[i] = branch(data[i], octree, depth_i, channel_i, block_num, training) @@ -36,16 +36,16 @@ def trans_func(data_in, octree, d0, d1, training): channel1 = channel0 * (2 ** (d0 - d1)) # if channel1 > 256: channel1 = 256 ## !!! clip the channel to 256 # no relu for the last feature map - with tf.variable_scope('trans_%d_%d' % (d0, d1)): + with tf.compat.v1.variable_scope('trans_%d_%d' % (d0, d1)): if d0 > d1: # downsample for d in range(d0, d1 + 1, -1): - with tf.variable_scope('down_%d' % d): + with tf.compat.v1.variable_scope('down_%d' % d): data = octree_conv_bn_relu(data, octree, d, channel0/4, training, stride=2) - with tf.variable_scope('down_%d' % (d1 + 1)): + with tf.compat.v1.variable_scope('down_%d' % (d1 + 1)): data = octree_conv_bn(data, octree, d1 + 1, channel1, training, stride=2) elif d0 < d1: # upsample for d in range(d0, d1, 1): - with tf.variable_scope('up_%d' % d): + with tf.compat.v1.variable_scope('up_%d' % d): if d == d0: data = octree_conv1x1_bn(data, channel1, training) data = octree_tile(data, octree, d) @@ -59,16 +59,16 @@ def trans_func(data_in, octree, d0, d1, training, upsample): channel1 = channel0 * (2 ** (d0 - d1)) # if channel1 > 256: channel1 = 256 ## !!! clip the channel to 256 # no relu for the last feature map - with tf.variable_scope('trans_%d_%d' % (d0, d1)): + with tf.compat.v1.variable_scope('trans_%d_%d' % (d0, d1)): if d0 > d1: # downsample for d in range(d0, d1, -1): - with tf.variable_scope('down_%d' % d): + with tf.compat.v1.variable_scope('down_%d' % d): data, _ = octree_max_pool(data, octree, d) - with tf.variable_scope('conv1x1_%d' % (d1)): + with tf.compat.v1.variable_scope('conv1x1_%d' % (d1)): data = octree_conv1x1_bn(data, channel1, training) elif d0 < d1: # upsample for d in range(d0, d1, 1): - with tf.variable_scope('up_%d' % d): + with tf.compat.v1.variable_scope('up_%d' % d): if d == d0: data = octree_conv1x1_bn(data, channel1, training) data = OctreeUpsample(upsample)(data, octree, d) @@ -86,7 +86,7 @@ def transitions(data, octree, depth, training, upsample='neareast'): outputs = [None] *(num + 1) for j in range(num + 1): - with tf.variable_scope('fuse_%d' % (depth - j)): + with tf.compat.v1.variable_scope('fuse_%d' % (depth - j)): outputs[j] = tf.nn.relu(tf.add_n(features[j])) return outputs @@ -98,42 +98,42 @@ def __init__(self, flags): def network(self, octree, training, mask=None, reuse=False): flags = self.flags - with tf.variable_scope('ocnn_hrnet', reuse=reuse): + with tf.compat.v1.variable_scope('ocnn_hrnet', reuse=reuse): # backbone convs = self.backbone(octree, training) self.tensors['convs'] = convs # header nout_cls, nout_seg = flags.nouts[0], flags.nouts[1] - with tf.variable_scope('seg_header'): + with tf.compat.v1.variable_scope('seg_header'): logit_seg = self.seg_header(convs, octree, nout_seg, mask, training) self.tensors['logit_seg'] = logit_seg - with tf.variable_scope('cls_header'): + with tf.compat.v1.variable_scope('cls_header'): logit_cls = self.cls_header(convs, octree, nout_cls, training) self.tensors['logit_cls'] = logit_cls return self.tensors def network_cls(self, octree, training, reuse=False): - with tf.variable_scope('ocnn_hrnet', reuse=reuse): + with tf.compat.v1.variable_scope('ocnn_hrnet', reuse=reuse): # backbone convs = self.backbone(octree, training) self.tensors['convs'] = convs # header - with tf.variable_scope('cls_header'): + with tf.compat.v1.variable_scope('cls_header'): logit = self.cls_header(convs, octree, self.flags.nout, training) self.tensors['logit_cls'] = logit return logit def network_seg(self, octree, training, reuse=False, pts=None, mask=None): - with tf.variable_scope('ocnn_hrnet', reuse=reuse): + with tf.compat.v1.variable_scope('ocnn_hrnet', reuse=reuse): ## backbone convs = self.backbone(octree, training) self.tensors['convs'] = convs ## header - with tf.variable_scope('seg_header'): + with tf.compat.v1.variable_scope('seg_header'): if pts is None: logit = self.seg_header(convs, octree, self.flags.nout, mask, training) else: @@ -145,20 +145,27 @@ def seg_header(self, inputs, octree, nout, mask, training): feature = self.points_feat(inputs, octree) depth_out, factor = self.flags.depth_out, self.flags.factor + if depth_out == 7: + feature = OctreeUpsample()(feature, octree, 5, mask) + conv6 = self.tensors['front/conv6'] # (1, C, H, 1) + feature = tf.concat([feature, conv6], axis=1) + feature = OctreeUpsample()(feature, octree, 6, mask) + conv7 = self.tensors['front/conv7'] # (1, C, H, 1) + feature = tf.concat([feature, conv7], axis=1) if depth_out == 6: - feature = OctreeUpsample('linear')(feature, octree, 5, mask) + feature = OctreeUpsample()(feature, octree, 5, mask) # rp 'upsample='linear'' conv6 = self.tensors['front/conv6'] # (1, C, H, 1) if mask is not None: - conv6 = tf.boolean_mask(conv6, mask, axis=2) + conv6 = tf.boolean_mask(tensor=conv6, mask=mask, axis=2) feature = tf.concat([feature, conv6], axis=1) else: if mask is not None: - feature = tf.boolean_mask(feature, mask, axis=2) + feature = tf.boolean_mask(tensor=feature, mask=mask, axis=2) # feature = octree_conv1x1_bn_relu(feature, 1024, training=training) - with tf.variable_scope('predict_%d' % depth_out): + with tf.compat.v1.variable_scope('predict_%d' % depth_out): logit = predict_module(feature, nout, 128 * factor, training) # 2-FC - logit = tf.transpose(tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) + logit = tf.transpose(a=tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) return logit def seg_header_pts(self, inputs, octree, nout, pts, training): @@ -175,9 +182,9 @@ def seg_header_pts(self, inputs, octree, nout, pts, training): conv6 = octree_nearest_interp(pts6, conv6, octree, depth=6) feature = tf.concat([feature, conv6], axis=1) - with tf.variable_scope('predict_%d' % depth_out): + with tf.compat.v1.variable_scope('predict_%d' % depth_out): logit = predict_module(feature, nout, 128 * factor, training) # 2-FC - logit = tf.transpose(tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) + logit = tf.transpose(a=tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) return logit @@ -186,7 +193,7 @@ def points_feat(self, inputs, octree): depth, factor, num = 5, self.flags.factor, len(inputs) assert(self.flags.depth >= depth) for i in range(1, num): - with tf.variable_scope('up_%d' % i): + with tf.compat.v1.variable_scope('up_%d' % i): for j in range(i): d = depth - i + j data[i] = OctreeUpsample(self.flags.upsample)(data[i], octree, d) @@ -201,9 +208,9 @@ def cls_header(self, inputs, octree, nout, training): for i in range(num): conv = data[i] d = depth - i - with tf.variable_scope('down_%d' % d): + with tf.compat.v1.variable_scope('down_%d' % d): for j in range(2 - i): - with tf.variable_scope('down_%d' % (d - j)): + with tf.compat.v1.variable_scope('down_%d' % (d - j)): conv, _ = octree_max_pool(conv, octree, d - j) data[i] = conv @@ -212,15 +219,15 @@ def cls_header(self, inputs, octree, nout, training): # conv = octree_conv1x1_bn_relu(features, 256, training) # with tf.variable_scope("fc1"): # conv = octree_conv1x1_bn_relu(conv, 512 * factor, training) - with tf.variable_scope("fc1"): + with tf.compat.v1.variable_scope("fc1"): conv = octree_conv1x1_bn_relu(features, 512 * factor, training) fc1 = octree_global_pool(conv, octree, depth=3) self.tensors['fc1'] = fc1 if self.flags.dropout[0]: - fc1 = tf.layers.dropout(fc1, rate=0.5, training=training) + fc1 = tf.compat.v1.layers.dropout(fc1, rate=0.5, training=training) - with tf.variable_scope("fc2"): + with tf.compat.v1.variable_scope("fc2"): # with tf.variable_scope('fc2_pre'): # fc1 = fc_bn_relu(fc1, 512, training=training) logit = dense(fc1, nout, use_bias=True) @@ -230,7 +237,7 @@ def cls_header(self, inputs, octree, nout, training): def backbone(self, octree, training): flags = self.flags depth, channel = flags.depth, 64 * flags.factor - with tf.variable_scope('signal'): + with tf.compat.v1.variable_scope('signal'): data = octree_property(octree, property_name='feature', dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) @@ -244,7 +251,7 @@ def backbone(self, octree, training): # stages stage_num = 3 for stage in range(1, stage_num + 1): - with tf.variable_scope('stage_%d' % stage): + with tf.compat.v1.variable_scope('stage_%d' % stage): convs = branches(convs, octree, d1, channel, flags.resblock_num, training) if stage == stage_num: break convs = transitions(convs, octree, depth=d1, training=training, upsample=flags.upsample) @@ -252,14 +259,19 @@ def backbone(self, octree, training): def front_layer(self, data, octree, d0, d1, channel, training): conv = data - with tf.variable_scope('front'): + with tf.compat.v1.variable_scope('front'): for d in range(d0, d1, -1): - with tf.variable_scope('depth_%d' % d): - channeld = channel / 2 ** (d - d1 + 1) + with tf.compat.v1.variable_scope('depth_%d' % d): + channeld = channel / (2 ** (d - d1 + 1)) conv = octree_conv_bn_relu(conv, octree, d, channeld, training) - self.tensors['front/conv6'] = conv # TODO: add a resblock here? + # conv = octree_conv_bn_relu(conv, octree, d, 48, training) + # conv = tf.layers.dropout(conv, rate=0.5, training=training) + for n in range(0, self.flags.resblock_num): + with tf.compat.v1.variable_scope('resblock_%d' % n): + conv = octree_resblock(conv, octree, d, channeld, 1, training) + self.tensors['front/conv'+str(d)] = conv # TODO: add a resblock here? conv, _ = octree_max_pool(conv, octree, d) - with tf.variable_scope('depth_%d' % d1): + with tf.compat.v1.variable_scope('depth_%d' % d1): conv = octree_conv_bn_relu(conv, octree, d1, channel, training) self.tensors['front/conv5'] = conv return conv diff --git a/tensorflow/script/network_hrnet4t6.py b/tensorflow/script/network_hrnet4t6.py new file mode 100644 index 00000000..04a15047 --- /dev/null +++ b/tensorflow/script/network_hrnet4t6.py @@ -0,0 +1,270 @@ +import tensorflow as tf +from ocnn import * + +class OctreeUpsample: + def __init__(self, upsample='nearest'): + self.upsample = upsample + + def __call__(self, data, octree, d, mask=None): + if self.upsample == 'nearest': + data = octree_tile(data, octree, d) + else: + data = octree_bilinear(data, octree, d, d + 1, mask) + return data + + +def branch(data, octree, depth, channel, block_num, training): + #rpXX if depth > 5: block_num = block_num // 2 # !!! whether should we add this !!! + for i in range(block_num): + with tf.compat.v1.variable_scope('resblock_d%d_%d' % (depth, i)): + # data = octree_resblock2(data, octree, depth, channel, training) + bottleneck = 4 if channel < 256 else 8 + data = octree_resblock(data, octree, depth, channel, 1, training, bottleneck) + return data + +def branches(data, octree, depth, channel, block_num, training): + for i in range(len(data)): + with tf.compat.v1.variable_scope('branch_%d' % (depth - i)): + depth_i, channel_i = depth - i, (2 ** i) * channel + # if channel_i > 256: channel_i = 256 + data[i] = branch(data[i], octree, depth_i, channel_i, block_num, training) + return data + +def trans_func(data_in, octree, d0, d1, training): + data = data_in + channel0 = int(data.shape[1]) + channel1 = channel0 * (2 ** (d0 - d1)) + # if channel1 > 256: channel1 = 256 ## !!! clip the channel to 256 + # no relu for the last feature map + with tf.compat.v1.variable_scope('trans_%d_%d' % (d0, d1)): + if d0 > d1: # downsample + for d in range(d0, d1 + 1, -1): + with tf.compat.v1.variable_scope('down_%d' % d): + data = octree_conv_bn_relu(data, octree, d, channel0/4, training, stride=2) + with tf.compat.v1.variable_scope('down_%d' % (d1 + 1)): + data = octree_conv_bn(data, octree, d1 + 1, channel1, training, stride=2) + elif d0 < d1: # upsample + for d in range(d0, d1, 1): + with tf.compat.v1.variable_scope('up_%d' % d): + if d == d0: + data = octree_conv1x1_bn(data, channel1, training) + data = octree_tile(data, octree, d) + else: # do nothing + pass + return data + +def trans_func(data_in, octree, d0, d1, training, upsample): + data = data_in + channel0 = int(data.shape[1]) + channel1 = channel0 * (2 ** (d0 - d1)) + # if channel1 > 256: channel1 = 256 ## !!! clip the channel to 256 + # no relu for the last feature map + with tf.compat.v1.variable_scope('trans_%d_%d' % (d0, d1)): + if d0 > d1: # downsample + for d in range(d0, d1, -1): + with tf.compat.v1.variable_scope('down_%d' % d): + data, _ = octree_max_pool(data, octree, d) + with tf.compat.v1.variable_scope('conv1x1_%d' % (d1)): + data = octree_conv1x1_bn(data, channel1, training) + elif d0 < d1: # upsample + for d in range(d0, d1, 1): + with tf.compat.v1.variable_scope('up_%d' % d): + if d == d0: + data = octree_conv1x1_bn(data, channel1, training) + data = OctreeUpsample(upsample)(data, octree, d) + else: # do nothing + pass + return data + +def transitions(data, octree, depth, training, upsample='neareast'): + num = len(data) + features = [[0]*num for i in range(num + 1)] + for i in range(num): + for j in range(num + 1): + d0, d1 = depth - i, depth - j + features[j][i] = trans_func(data[i], octree, d0, d1, training, upsample) + + outputs = [None] *(num + 1) + for j in range(num + 1): + with tf.compat.v1.variable_scope('fuse_%d' % (depth - j)): + outputs[j] = tf.nn.relu(tf.add_n(features[j])) + return outputs + + +class HRNet: + def __init__(self, flags): + self.tensors = dict() + self.flags = flags + + def network(self, octree, training, mask=None, reuse=False): + flags = self.flags + with tf.compat.v1.variable_scope('ocnn_hrnet', reuse=reuse): + # backbone + convs = self.backbone(octree, training) + self.tensors['convs'] = convs + + # header + nout_cls, nout_seg = flags.nouts[0], flags.nouts[1] + with tf.compat.v1.variable_scope('seg_header'): + logit_seg = self.seg_header(convs, octree, nout_seg, mask, training) + self.tensors['logit_seg'] = logit_seg + + with tf.compat.v1.variable_scope('cls_header'): + logit_cls = self.cls_header(convs, octree, nout_cls, training) + self.tensors['logit_cls'] = logit_cls + return self.tensors + + def network_cls(self, octree, training, reuse=False): + with tf.compat.v1.variable_scope('ocnn_hrnet', reuse=reuse): + # backbone + convs = self.backbone(octree, training) + self.tensors['convs'] = convs + + # header + with tf.compat.v1.variable_scope('cls_header'): + logit = self.cls_header(convs, octree, self.flags.nout, training) + self.tensors['logit_cls'] = logit + return logit + + def network_seg(self, octree, training, reuse=False, pts=None, mask=None): + with tf.compat.v1.variable_scope('ocnn_hrnet', reuse=reuse): + ## backbone + convs = self.backbone(octree, training) + self.tensors['convs'] = convs + + ## header + with tf.compat.v1.variable_scope('seg_header'): + if pts is None: + logit = self.seg_header(convs, octree, self.flags.nout, mask, training) + else: + logit = self.seg_header_pts(convs, octree, self.flags.nout, pts, training) + self.tensors['logit_seg'] = logit + return logit + + def seg_header(self, inputs, octree, nout, mask, training): + feature = self.points_feat(inputs, octree) + + depth_out, factor = self.flags.depth_out, self.flags.factor + # if depth_out == 7: + # feature = OctreeUpsample()(feature, octree, 5, mask) + # conv6 = self.tensors['front/conv6'] # (1, C, H, 1) + # feature = tf.concat([feature, conv6], axis=1) + # feature = OctreeUpsample()(feature, octree, 6, mask) + # conv7 = self.tensors['front/conv7'] # (1, C, H, 1) + # feature = tf.concat([feature, conv7], axis=1) + if depth_out == 7: + feature = OctreeUpsample()(feature, octree, 6, mask) # rp 'linear' + conv7 = self.tensors['front/conv7'] # (1, C, H, 1) + feature = tf.concat([feature, conv7], axis=1) + else: + if mask is not None: + feature = tf.boolean_mask(tensor=feature, mask=mask, axis=2) + + # feature = octree_conv1x1_bn_relu(feature, 1024, training=training) + with tf.compat.v1.variable_scope('predict_%d' % depth_out): + logit = predict_module(feature, nout, 128 * factor, training) # 2-FC + logit = tf.transpose(a=tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) + return logit + + def seg_header_pts(self, inputs, octree, nout, pts, training): + feature = self.points_feat(inputs, octree) # The resolution is 5-depth + + depth_out, factor = self.flags.depth_out, self.flags.factor + xyz, ids = tf.split(pts, [3, 1], axis=1) + xyz = xyz + 1.0 # [0, 2] + pts5 = tf.concat([xyz * 16.0, ids], axis=1) # [0, 32] + feature = octree_bilinear_v3(pts5, feature, octree, depth=5) + if depth_out == 6: + conv6 = self.tensors['front/conv6'] # The resolution is 6-depth + pts6 = tf.concat([xyz * 32.0, ids], axis=1) # [0, 64] + conv6 = octree_nearest_interp(pts6, conv6, octree, depth=6) + feature = tf.concat([feature, conv6], axis=1) + + with tf.compat.v1.variable_scope('predict_%d' % depth_out): + logit = predict_module(feature, nout, 128 * factor, training) # 2-FC + logit = tf.transpose(a=tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) + return logit + + + def points_feat(self, inputs, octree): + data = [t for t in inputs] + depth, factor, num = 5, self.flags.factor, len(inputs) + assert(self.flags.depth >= depth) + for i in range(1, num): + with tf.compat.v1.variable_scope('up_%d' % i): + for j in range(i): + d = depth - i + j + data[i] = OctreeUpsample(self.flags.upsample)(data[i], octree, d) + feature = tf.concat(data, axis=1) # the resolution is depth-5 + return feature + + def cls_header(self, inputs, octree, nout, training): + data = [t for t in inputs] + channel = [int(t.shape[1]) for t in inputs] + depth, factor, num = 5, self.flags.factor, len(inputs) + assert(self.flags.depth >= depth) + for i in range(num): + conv = data[i] + d = depth - i + with tf.compat.v1.variable_scope('down_%d' % d): + for j in range(2 - i): + with tf.compat.v1.variable_scope('down_%d' % (d - j)): + conv, _ = octree_max_pool(conv, octree, d - j) + data[i] = conv + + features = tf.concat(data, axis=1) + # with tf.variable_scope("fc0"): + # conv = octree_conv1x1_bn_relu(features, 256, training) + # with tf.variable_scope("fc1"): + # conv = octree_conv1x1_bn_relu(conv, 512 * factor, training) + with tf.compat.v1.variable_scope("fc1"): + conv = octree_conv1x1_bn_relu(features, 512 * factor, training) + + fc1 = octree_global_pool(conv, octree, depth=3) + self.tensors['fc1'] = fc1 + if self.flags.dropout[0]: + fc1 = tf.compat.v1.layers.dropout(fc1, rate=0.5, training=training) + + with tf.compat.v1.variable_scope("fc2"): + # with tf.variable_scope('fc2_pre'): + # fc1 = fc_bn_relu(fc1, 512, training=training) + logit = dense(fc1, nout, use_bias=True) + self.tensors['fc2'] = logit + return logit + + def backbone(self, octree, training): + flags = self.flags + depth, channel = flags.depth, 64 * flags.factor + with tf.compat.v1.variable_scope('signal'): + data = octree_property(octree, property_name='feature', dtype=tf.float32, + depth=depth, channel=flags.channel) + data = tf.reshape(data, [1, flags.channel, -1, 1]) + if flags.signal_abs: data = tf.abs(data) + + # front + convs = [None] + channel, d1 = 64 * flags.factor, 6 #rpXX + convs[0] = self.front_layer(data, octree, depth, d1, channel, training) + + # stages + stage_num = 3 + for stage in range(1, stage_num + 1): + with tf.compat.v1.variable_scope('stage_%d' % stage): + convs = branches(convs, octree, d1, channel, flags.resblock_num, training) + if stage == stage_num: break + convs = transitions(convs, octree, depth=d1, training=training, upsample=flags.upsample) + return convs + + def front_layer(self, data, octree, d0, d1, channel, training): + conv = data + with tf.compat.v1.variable_scope('front'): + for d in range(d0, d1, -1): + with tf.compat.v1.variable_scope('depth_%d' % d): + channeld = channel / (2 ** (d - d1 + 1)) + conv = octree_conv_bn_relu(conv, octree, d, channeld, training) + self.tensors['front/conv'+str(d)] = conv # TODO: add a resblock here? + conv, _ = octree_max_pool(conv, octree, d) + with tf.compat.v1.variable_scope('depth_%d' % d1): + conv = octree_conv_bn_relu(conv, octree, d1, channel, training) + self.tensors['front/conv'+str(d1)] = conv + return conv diff --git a/tensorflow/script/network_unet.py b/tensorflow/script/network_unet.py index afe22014..837bf639 100644 --- a/tensorflow/script/network_unet.py +++ b/tensorflow/script/network_unet.py @@ -4,9 +4,16 @@ def network_unet(octree, flags, training, reuse=False): depth = flags.depth - nout = [512, 256, 256, 256, 256, 128, 64, 32, 16, 16, 16] - with tf.variable_scope('ocnn_unet', reuse=reuse): - with tf.variable_scope('signal'): + #nout = [512, 256, 256, 256, 256, 128, 64, 32, 16, 16, 16] + #nout = [512, 256, 512, 256, 256, 128, 64, 32, 16, 16, 16] + if depth==7: + nout = [0,0, 512, 256, 128, 128, 64, 32, 0, 0, 0] + else: + #nout = [ 0, 0, 512, 256, 128, 64, 16, 0, 0, 0, 0] + nout = [ 0, 0, 256, 256, 128, 128, 64, 0, 0, 0, 0] + + with tf.compat.v1.variable_scope('ocnn_unet', reuse=reuse): + with tf.compat.v1.variable_scope('signal'): data = octree_property(octree, property_name='feature', dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) @@ -15,7 +22,7 @@ def network_unet(octree, flags, training, reuse=False): convd = [None]*10 convd[depth+1] = data for d in range(depth, 1, -1): - with tf.variable_scope('encoder_d%d' % d): + with tf.compat.v1.variable_scope('encoder_d%d' % d): # downsampling dd = d if d == depth else d + 1 stride = 1 if d == depth else 2 @@ -24,29 +31,35 @@ def network_unet(octree, flags, training, reuse=False): stride=stride, kernel_size=kernel_size) # resblock for n in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d' % n): + with tf.compat.v1.variable_scope('resblock_%d' % n): convd[d] = octree_resblock(convd[d], octree, d, nout[d], 1, training) + convd[2] = tf.compat.v1.layers.dropout(convd[2], rate=0.5, training=training) ## decoder deconv = convd[2] for d in range(3, depth + 1): - with tf.variable_scope('decoder_d%d' % d): - # upsampling + with tf.compat.v1.variable_scope('decoder_d%d' % d): + # upsampling # deconv = octree_tile(deconv, d-1, d, octree=octree1) # deconv = octree_upsample(deconv, octree, d-1, nout[d], training) deconv = octree_deconv_bn_relu(deconv, octree, d-1, nout[d], training, kernel_size=[2], stride=2, fast_mode=False) - deconv = convd[d] + deconv # skip connections + if depth!=7: + deconv = convd[d] + deconv # skip connections + elif d % 2==0: + deconv = convd[d] + deconv # skip connections + # if d==6: + # deconv = tf.layers.dropout(deconv, rate=0.5, training=training) # resblock for n in range(0, flags.resblock_num): - with tf.variable_scope('resblock_%d' % n): + with tf.compat.v1.variable_scope('resblock_%d' % n): deconv = octree_resblock(deconv, octree, d, nout[d], 1, training) # segmentation if d == depth: - with tf.variable_scope('predict_label'): + with tf.compat.v1.variable_scope('predict_label'): logit = predict_module(deconv, flags.nout, 64, training) - logit = tf.transpose(tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) + logit = tf.transpose(a=tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C) return logit \ No newline at end of file diff --git a/tensorflow/script/ocnn.py b/tensorflow/script/ocnn.py index b8067220..3494506f 100644 --- a/tensorflow/script/ocnn.py +++ b/tensorflow/script/ocnn.py @@ -8,7 +8,7 @@ def get_variables_with_name(name=None, without=None, train_only=True, verbose=Fa if name is None: raise Exception("please input a name") - t_vars = tf.trainable_variables() if train_only else tf.all_variables() + t_vars = tf.compat.v1.trainable_variables() if train_only else tf.compat.v1.all_variables() d_vars = [var for var in t_vars if name in var.name] if without is not None: @@ -23,14 +23,14 @@ def get_variables_with_name(name=None, without=None, train_only=True, verbose=Fa def dense(inputs, nout, use_bias=False): - inputs = tf.layers.flatten(inputs) - fc = tf.layers.dense(inputs, nout, use_bias=use_bias, - kernel_initializer=tf.contrib.layers.xavier_initializer()) + inputs = tf.compat.v1.layers.flatten(inputs) + fc = tf.compat.v1.layers.dense(inputs, nout, use_bias=use_bias, + kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) return fc def batch_norm(inputs, training, axis=1): - return tf.layers.batch_normalization(inputs, axis=axis, training=training) + return tf.compat.v1.layers.batch_normalization(inputs, axis=axis, training=training) def fc_bn_relu(inputs, nout, training): @@ -40,27 +40,27 @@ def fc_bn_relu(inputs, nout, training): def conv2d(inputs, nout, kernel_size, stride, padding='SAME', data_format='channels_first'): - return tf.layers.conv2d(inputs, nout, kernel_size=kernel_size, strides=stride, + return tf.compat.v1.layers.conv2d(inputs, nout, kernel_size=kernel_size, strides=stride, padding=padding, data_format=data_format, use_bias=False, - kernel_initializer=tf.contrib.layers.xavier_initializer()) + kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) def octree_conv1x1(inputs, nout, use_bias=False): - outputs = tf.layers.conv2d(inputs, nout, kernel_size=1, strides=1, + outputs = tf.compat.v1.layers.conv2d(inputs, nout, kernel_size=1, strides=1, data_format='channels_first', use_bias=use_bias, - kernel_initializer=tf.contrib.layers.xavier_initializer()) + kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) return outputs def octree_conv1x1(inputs, nout, use_bias=False): - with tf.variable_scope('conv2d_1x1'): + with tf.compat.v1.variable_scope('conv2d_1x1'): inputs = tf.squeeze(inputs, axis=[0, 3]) # (1, C, H, 1) -> (C, H) - weights = tf.get_variable('weights', shape=[nout, int(inputs.shape[0])], - dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) + weights = tf.compat.v1.get_variable('weights', shape=[int(nout), int(inputs.shape[0])], + dtype=tf.float32, initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) outputs = tf.matmul(weights, inputs) # (C, H) -> (nout, H) if use_bias: - bias = tf.get_variable('bias', shape=[nout, 1], dtype=tf.float32, - initializer=tf.contrib.layers.xavier_initializer()) + bias = tf.compat.v1.get_variable('bias', shape=[int(nout), 1], dtype=tf.float32, + initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) outputs = bias + outputs outputs = tf.expand_dims(tf.expand_dims(outputs, axis=0), axis=-1) return outputs @@ -87,34 +87,34 @@ def conv2d_bn_relu(inputs, nout, kernel_size, stride, training): def upsample(data, channel, training): - deconv = tf.layers.conv2d_transpose(data, channel, kernel_size=[8, 1], + deconv = tf.compat.v1.layers.conv2d_transpose(data, channel, kernel_size=[8, 1], strides=[8, 1], data_format='channels_first', use_bias=False, - kernel_initializer=tf.contrib.layers.xavier_initializer()) - bn = tf.layers.batch_normalization(deconv, axis=1, training=training) + kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) + bn = tf.compat.v1.layers.batch_normalization(deconv, axis=1, training=training) return tf.nn.relu(bn) def downsample(data, channel, training): - deconv = tf.layers.conv2d(data, channel, kernel_size=[8, 1], + deconv = tf.compat.v1.layers.conv2d(data, channel, kernel_size=[8, 1], strides=[8, 1], data_format='channels_first', use_bias=False, - kernel_initializer=tf.contrib.layers.xavier_initializer()) - bn = tf.layers.batch_normalization(deconv, axis=1, training=training) + kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")) + bn = tf.compat.v1.layers.batch_normalization(deconv, axis=1, training=training) return tf.nn.relu(bn) def avg_pool2d(inputs, data_format='NCHW'): - return tf.nn.avg_pool2d(inputs, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], + return tf.nn.avg_pool2d(input=inputs, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', data_format=data_format) def global_pool(inputs, data_format='channels_first'): axis = [2, 3] if data_format == 'channels_first' else [1, 2] - return tf.reduce_mean(inputs, axis=axis) + return tf.reduce_mean(input_tensor=inputs, axis=axis) # !!! Deprecated def octree_upsample(data, octree, depth, channel, training): - with tf.variable_scope('octree_upsample'): + with tf.compat.v1.variable_scope('octree_upsample'): depad = octree_depad(data, octree, depth) up = upsample(depad, channel, training) return up @@ -127,7 +127,7 @@ def octree_upsample(data, octree, depth, channel, training): def octree_downsample(data, octree, depth, channel, training): - with tf.variable_scope('octree_downsample'): + with tf.compat.v1.variable_scope('octree_downsample'): down = downsample(data, channel, training) pad = octree_pad(down, octree, depth) return pad @@ -140,12 +140,12 @@ def octree_conv_bn(data, octree, depth, channel, training, kernel_size=[3], else: conv = octree_conv_memory( data, octree, depth, channel, kernel_size, stride) - return tf.layers.batch_normalization(conv, axis=1, training=training) + return tf.compat.v1.layers.batch_normalization(conv, axis=1, training=training) def octree_conv_bn_relu(data, octree, depth, channel, training, kernel_size=[3], stride=1, fast_mode=False): - with tf.variable_scope('conv_bn_relu'): + with tf.compat.v1.variable_scope('conv_bn_relu'): conv_bn = octree_conv_bn(data, octree, depth, channel, training, kernel_size, stride, fast_mode) rl = tf.nn.relu(conv_bn) @@ -165,12 +165,12 @@ def octree_deconv_bn(data, octree, depth, channel, training, kernel_size=[3], else: conv = octree_deconv_memory( data, octree, depth, channel, kernel_size, stride) - return tf.layers.batch_normalization(conv, axis=1, training=training) + return tf.compat.v1.layers.batch_normalization(conv, axis=1, training=training) def octree_deconv_bn_relu(data, octree, depth, channel, training, kernel_size=[3], stride=1, fast_mode=False): - with tf.variable_scope('deconv_bn_relu'): + with tf.compat.v1.variable_scope('deconv_bn_relu'): conv_bn = octree_deconv_bn(data, octree, depth, channel, training, kernel_size, stride, fast_mode) rl = tf.nn.relu(conv_bn) @@ -184,18 +184,18 @@ def octree_resblock(data, octree, depth, num_out, stride, training, bottleneck=4 data, mask = octree_max_pool(data, octree, depth=depth) depth = depth - 1 - with tf.variable_scope("1x1x1_a"): + with tf.compat.v1.variable_scope("1x1x1_a"): block1 = octree_conv1x1_bn_relu(data, channelb, training=training) - with tf.variable_scope("3x3x3"): + with tf.compat.v1.variable_scope("3x3x3"): block2 = octree_conv_bn_relu(block1, octree, depth, channelb, training) - with tf.variable_scope("1x1x1_b"): + with tf.compat.v1.variable_scope("1x1x1_b"): block3 = octree_conv1x1_bn(block2, num_out, training=training) block4 = data if num_in != num_out: - with tf.variable_scope("1x1x1_c"): + with tf.compat.v1.variable_scope("1x1x1_c"): block4 = octree_conv1x1_bn(data, num_out, training=training) return tf.nn.relu(block3 + block4) @@ -203,14 +203,14 @@ def octree_resblock(data, octree, depth, num_out, stride, training, bottleneck=4 def octree_resblock2(data, octree, depth, num_out, training): num_in = int(data.shape[1]) - with tf.variable_scope("conv_1"): + with tf.compat.v1.variable_scope("conv_1"): conv = octree_conv_bn_relu(data, octree, depth, num_out/4, training) - with tf.variable_scope("conv_2"): + with tf.compat.v1.variable_scope("conv_2"): conv = octree_conv_bn(conv, octree, depth, num_out, training) link = data if num_in != num_out: - with tf.variable_scope("conv_1x1"): + with tf.compat.v1.variable_scope("conv_1x1"): link = octree_conv1x1_bn(data, num_out, training=training) out = tf.nn.relu(conv + link) @@ -219,9 +219,9 @@ def octree_resblock2(data, octree, depth, num_out, training): def predict_module(data, num_output, num_hidden, training): # MLP with one hidden layer - with tf.variable_scope('conv1'): + with tf.compat.v1.variable_scope('conv1'): conv = octree_conv1x1_bn_relu(data, num_hidden, training) - with tf.variable_scope('conv2'): + with tf.compat.v1.variable_scope('conv2'): logit = octree_conv1x1(conv, num_output, use_bias=True) return logit @@ -229,7 +229,7 @@ def predict_module(data, num_output, num_hidden, training): def predict_label(data, num_output, num_hidden, training): logit = predict_module(data, num_output, num_hidden, training) # prob = tf.nn.softmax(logit, axis=1) # logit (1, num_output, ?, 1) - label = tf.argmax(logit, axis=1, output_type=tf.int32) # predict (1, ?, 1) + label = tf.argmax(input=logit, axis=1, output_type=tf.int32) # predict (1, ?, 1) label = tf.reshape(label, [-1]) # flatten return logit, label @@ -238,17 +238,26 @@ def predict_signal(data, num_output, num_hidden, training): return tf.nn.tanh(predict_module(data, num_output, num_hidden, training)) -def softmax_loss(logit, label_gt, num_class, label_smoothing=0.0): - with tf.name_scope('softmax_loss'): +def softmax_loss(logit, label_gt, num_class, label_smoothing=0.0,infoGain=False): + with tf.compat.v1.name_scope('softmax_loss'): label_gt = tf.cast(label_gt, tf.int32) - onehot = tf.one_hot(label_gt, depth=num_class) - loss = tf.losses.softmax_cross_entropy( - onehot, logit, label_smoothing=label_smoothing) + + if infoGain : + # specify some class weightings + # specify the weights for each sample in the batch (without having to compute the onehot label matrix) + class_weights = tf.constant([0.012,0.084,0.047,0.000,0.038,0.095,0.017,0.019,0.205,0.247,0.102,0.067,0.069,0.000]) + weights = tf.gather(class_weights, label_gt) + # compute the loss + #weights=tf.Print(weights,[tf.shape(weights)],"step 2",summarize=100,first_n=10) + loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(label_gt, logit, weights) + else: + onehot = tf.one_hot(label_gt, depth=num_class) + loss = tf.compat.v1.losses.softmax_cross_entropy( onehot, logit, label_smoothing=label_smoothing) return loss def l2_regularizer(name, weight_decay): - with tf.name_scope('l2_regularizer'): + with tf.compat.v1.name_scope('l2_regularizer'): var = get_variables_with_name(name) regularizer = tf.add_n([tf.nn.l2_loss(v) for v in var]) * weight_decay return regularizer @@ -256,25 +265,25 @@ def l2_regularizer(name, weight_decay): def label_accuracy(label, label_gt): label_gt = tf.cast(label_gt, tf.int32) - accuracy = tf.reduce_mean(tf.to_float(tf.equal(label, label_gt))) + accuracy = tf.reduce_mean(input_tensor=tf.cast(tf.equal(label, label_gt), dtype=tf.float32)) return accuracy def softmax_accuracy(logit, label): - with tf.name_scope('softmax_accuracy'): - predict = tf.argmax(logit, axis=1, output_type=tf.int32) + with tf.compat.v1.name_scope('softmax_accuracy'): + predict = tf.argmax(input=logit, axis=1, output_type=tf.int32) accu = label_accuracy(predict, tf.cast(label, tf.int32)) return accu def regress_loss(signal, signal_gt): - return tf.reduce_mean(tf.reduce_sum(tf.square(signal-signal_gt), 1)) + return tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=tf.square(signal-signal_gt), axis=1)) def normalize_signal(data): channel = data.shape[1] assert(channel == 3 or channel == 4) - with tf.variable_scope("normalize"): + with tf.compat.v1.variable_scope("normalize"): if channel == 4: normals = tf.slice(data, [0, 0, 0, 0], [1, 3, -1, 1]) displacement = tf.slice(data, [0, 3, 0, 0], [1, 1, -1, 1]) @@ -287,22 +296,22 @@ def normalize_signal(data): def average_tensors(tower_tensors): avg_tensors = [] - with tf.name_scope('avg_tensors'): + with tf.compat.v1.name_scope('avg_tensors'): for tensors in tower_tensors: tensors = [tf.expand_dims(tensor, 0) for tensor in tensors] avg_tensor = tf.concat(tensors, axis=0) - avg_tensor = tf.reduce_mean(avg_tensor, 0) + avg_tensor = tf.reduce_mean(input_tensor=avg_tensor, axis=0) avg_tensors.append(avg_tensor) return avg_tensors def solver_single_gpu(total_loss, learning_rate_handle, gpu_num=1): - with tf.variable_scope('solver'): - update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + with tf.compat.v1.variable_scope('solver'): + update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): global_step = tf.Variable(0, trainable=False, name='global_step') lr = learning_rate_handle(global_step) - solver = tf.train.MomentumOptimizer(lr, 0.9) \ + solver = tf.compat.v1.train.MomentumOptimizer(lr, 0.9) \ .minimize(total_loss, global_step=global_step) return solver, lr @@ -310,25 +319,25 @@ def solver_single_gpu(total_loss, learning_rate_handle, gpu_num=1): def solver_multiple_gpus(total_loss, learning_rate_handle, gpu_num): tower_grads, variables = [], [] with tf.device('/cpu:0'): - with tf.variable_scope('solver'): + with tf.compat.v1.variable_scope('solver'): global_step = tf.Variable(0, trainable=False, name='global_step') lr = learning_rate_handle(global_step) - opt = tf.train.MomentumOptimizer(lr, 0.9) + opt = tf.compat.v1.train.MomentumOptimizer(lr, 0.9) for i in range(gpu_num): with tf.device('/gpu:%d' % i): - with tf.name_scope('device_b%d' % i): + with tf.compat.v1.name_scope('device_b%d' % i): grads_and_vars = opt.compute_gradients(total_loss[i]) grads, variables = zip(*grads_and_vars) tower_grads.append(grads) - update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) # !!! Only get the update_ops defined on `device_0` to avoid the sync # between different GPUs to speed up the training process. !!! update_ops = [op for op in update_ops if 'device_0' in op.name] assert update_ops, 'The update ops of BN are empty, check the namescope \'device_0\'' with tf.device('/cpu:0'): - with tf.name_scope('sync_and_apply_grad'): + with tf.compat.v1.name_scope('sync_and_apply_grad'): with tf.control_dependencies(update_ops): tower_grads = list(zip(*tower_grads)) avg_grads = average_tensors(tower_grads) @@ -344,47 +353,48 @@ def build_solver(total_loss, learning_rate_handle, gpu_num=1): def summary_train(names, tensors): - with tf.name_scope('summary_train'): + with tf.compat.v1.name_scope('summary_train'): summaries = [] for it in zip(names, tensors): - summaries.append(tf.summary.scalar(it[0], it[1])) - summ = tf.summary.merge(summaries) + summaries.append(tf.compat.v1.summary.scalar(it[0], it[1])) + summ = tf.compat.v1.summary.merge(summaries) return summ def summary_test(names): - with tf.name_scope('summary_test'): + with tf.compat.v1.name_scope('summary_test'): summaries = [] summ_placeholder = [] for name in names: - summ_placeholder.append(tf.placeholder(tf.float32)) - summaries.append(tf.summary.scalar(name, summ_placeholder[-1])) - summ = tf.summary.merge(summaries) + summ_placeholder.append(tf.compat.v1.placeholder(tf.float32)) + summaries.append(tf.compat.v1.summary.scalar(name, summ_placeholder[-1])) + summ = tf.compat.v1.summary.merge(summaries) return summ, summ_placeholder def loss_functions(logit, label_gt, num_class, weight_decay, var_name, label_smoothing=0.0): - with tf.name_scope('loss'): + with tf.compat.v1.name_scope('loss'): loss = softmax_loss(logit, label_gt, num_class, label_smoothing) accu = softmax_accuracy(logit, label_gt) regularizer = l2_regularizer(var_name, weight_decay) return [loss, accu, regularizer] -def loss_functions_seg(logit, label_gt, num_class, weight_decay, var_name, mask=-1): - with tf.name_scope('loss_seg'): - label_mask = label_gt > mask # filter label -1 - masked_logit = tf.boolean_mask(logit, label_mask) - masked_label = tf.boolean_mask(label_gt, label_mask) - loss = softmax_loss(masked_logit, masked_label, num_class) +def loss_functions_seg(logit, label_gt, num_class, weight_decay, var_name, mask=-1,infoGain=False): + with tf.compat.v1.name_scope('loss_seg'): + label_mask = tf.greater(label_gt,-1) # filter label -1 + masked_logit = tf.boolean_mask(tensor=logit, mask=label_mask) + masked_label = tf.boolean_mask(tensor=label_gt, mask=label_mask) + loss = softmax_loss(masked_logit, masked_label, num_class,infoGain=infoGain) accu = softmax_accuracy(masked_logit, masked_label) regularizer = l2_regularizer(var_name, weight_decay) + #return [loss, accu, regularizer],masked_logit,masked_label #rp?? return [loss, accu, regularizer] def get_seg_label(octree, depth): - with tf.name_scope('seg_label'): + with tf.compat.v1.name_scope('seg_label'): label = octree_property(octree, property_name='label', dtype=tf.float32, depth=depth, channel=1) label = tf.reshape(tf.cast(label, tf.int32), [-1]) @@ -405,18 +415,18 @@ def run_k_iterations(sess, k, tensors): def tf_IoU_per_shape(pred, label, class_num, mask=-1): - with tf.name_scope('IoU'): + with tf.compat.v1.name_scope('IoU'): label_mask = label > mask # filter label -1 - pred = tf.boolean_mask(pred, label_mask) - label = tf.boolean_mask(label, label_mask) - pred = tf.argmax(pred, axis=1, output_type=tf.int32) + pred = tf.boolean_mask(tensor=pred, mask=label_mask) + label = tf.boolean_mask(tensor=label, mask=label_mask) + pred = tf.argmax(input=pred, axis=1, output_type=tf.int32) IoU, valid_part_num, esp = 0.0, 0.0, 1.0e-10 for k in range(class_num): pk, lk = tf.equal(pred, k), tf.equal(label, k) # pk, lk = pred == k, label == k # why can this not output the right results? - intsc = tf.reduce_sum(tf.cast(pk & lk, dtype=tf.float32)) - union = tf.reduce_sum(tf.cast(pk | lk, dtype=tf.float32)) - valid = tf.cast(tf.reduce_any(lk), dtype=tf.float32) + intsc = tf.reduce_sum(input_tensor=tf.cast(pk & lk, dtype=tf.float32)) + union = tf.reduce_sum(input_tensor=tf.cast(pk | lk, dtype=tf.float32)) + valid = tf.cast(tf.reduce_any(input_tensor=lk), dtype=tf.float32) valid_part_num += valid IoU += valid * intsc / (union + esp) IoU /= valid_part_num + esp @@ -430,12 +440,12 @@ def __init__(self, stype='SGD', var_list=None, mul=1.0): self.var_list = var_list def __call__(self, total_loss, learning_rate): - with tf.name_scope('solver'): - update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + with tf.compat.v1.name_scope('solver'): + update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): global_step = tf.Variable(0, trainable=False, name='global_step') lr = learning_rate(global_step) * self.mul - solver = tf.train.MomentumOptimizer(lr, 0.9) \ + solver = tf.compat.v1.train.MomentumOptimizer(lr, 0.9) \ .minimize(total_loss, global_step=global_step, var_list=self.var_list) return solver, lr @@ -443,15 +453,15 @@ def __call__(self, total_loss, learning_rate): def octree2points(octree, depth, pts_channel=4, output_normal=False): - with tf.name_scope('octree2points'): + with tf.compat.v1.name_scope('octree2points'): signal = octree_signal(octree, depth, 4) # normal and displacement - signal = tf.transpose(tf.squeeze(signal, [0, 3])) # (1, C, H, 1) -> (H, C) + signal = tf.transpose(a=tf.squeeze(signal, [0, 3])) # (1, C, H, 1) -> (H, C) xyz = octree_xyz(octree, depth) xyz = tf.cast(xyz, dtype=tf.float32) mask = octree_child(octree, depth) > -1 - signal = tf.boolean_mask(signal, mask) - xyz = tf.boolean_mask(xyz, mask) + signal = tf.boolean_mask(tensor=signal, mask=mask) + xyz = tf.boolean_mask(tensor=xyz, mask=mask) c = 3.0 ** 0.5 / 2.0 normal, dis = tf.split(signal, [3, 1], axis=1) diff --git a/tensorflow/script/readme NNCAD changes.txt b/tensorflow/script/readme NNCAD changes.txt new file mode 100644 index 00000000..c7b83caf --- /dev/null +++ b/tensorflow/script/readme NNCAD changes.txt @@ -0,0 +1,12 @@ +The folder contains modified OCNN tensorflow script files: + +network_aenet.py This is the caffe voxel labeling network implemented in tensorflow + + + +tfsolver.py: Reads number of tfrecords in the test set. + Contain a declaration of a global list used in the command to load tensors used in debuging. + And the line change to the session run to extract these tensors. + + +ocnn.py: Contains change to the loss function replacing > with tf.greater to fix OCNN bug diff --git a/tensorflow/script/run_ae.py b/tensorflow/script/run_ae.py index 81421b31..58e15d8f 100644 --- a/tensorflow/script/run_ae.py +++ b/tensorflow/script/run_ae.py @@ -21,7 +21,7 @@ def compute_graph(dataset='train', training=True, reuse=False): code = autoencoder.octree_encoder(octree, training, reuse) loss, accu = autoencoder.octree_decoder(code, octree, training, reuse) - with tf.name_scope('total_loss'): + with tf.compat.v1.name_scope('total_loss'): reg = l2_regularizer('ocnn', FLAGS.LOSS.weight_decay) total_loss = tf.add_n(loss + [reg]) tensors = loss + [reg] + accu + [total_loss] @@ -41,17 +41,17 @@ def decode_shape(self): # checkpoint assert(self.flags.ckpt) # the self.flags.ckpt should be provided - tf_saver = tf.train.Saver(max_to_keep=20) + tf_saver = tf.compat.v1.train.Saver(max_to_keep=20) # start - config = tf.ConfigProto() + config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True - with tf.Session(config=config) as sess: + with tf.compat.v1.Session(config=config) as sess: # restore and initialize self.initialize(sess) tf_saver.restore(sess, self.flags.ckpt) logdir = self.flags.logdir - tf.summary.FileWriter(logdir, sess.graph) + tf.compat.v1.summary.FileWriter(logdir, sess.graph) print('Start testing ...') for i in tqdm(range(0, self.flags.test_iter)): diff --git a/tensorflow/script/run_cls.py b/tensorflow/script/run_cls.py index eeac1d63..f992fcca 100644 --- a/tensorflow/script/run_cls.py +++ b/tensorflow/script/run_cls.py @@ -21,7 +21,7 @@ def __call__(self, dataset='train', training=True, reuse=False, gpu_num=1): tower_tensors = [] for i in range(gpu_num): with tf.device('/gpu:%d' % i): - with tf.name_scope('device_%d' % i): + with tf.compat.v1.name_scope('device_%d' % i): octree, label = data_iter.get_next() logit = cls_network(octree, FLAGS.MODEL, training, reuse) losses = loss_functions(logit, label, FLAGS.LOSS.num_class, diff --git a/tensorflow/script/run_cls_finetune.py b/tensorflow/script/run_cls_finetune.py index 379f6572..ac0d3af5 100644 --- a/tensorflow/script/run_cls_finetune.py +++ b/tensorflow/script/run_cls_finetune.py @@ -51,7 +51,7 @@ def restore(self, sess, ckpt): print('Restore from: ' + ckpt) var_restore = get_variables_with_name( 'ocnn', without='fc2', verbose=self.flags.verbose, train_only=False) - tf_saver = tf.train.Saver(var_list=var_restore) + tf_saver = tf.compat.v1.train.Saver(var_list=var_restore) tf_saver.restore(sess, ckpt) diff --git a/tensorflow/script/run_completion.py b/tensorflow/script/run_completion.py index ea715167..dd33badf 100644 --- a/tensorflow/script/run_completion.py +++ b/tensorflow/script/run_completion.py @@ -52,13 +52,13 @@ def gen_scan_axis(self, i): def __call__(self, record_names, batch_size, shuffle_size=1000, return_iter=False, take=-1, **kwargs): - with tf.name_scope('points_dataset'): + with tf.compat.v1.name_scope('points_dataset'): def preprocess(record): points, label = self.parse_example(record) points = self.normalize_points(points) points = self.transform_points(points) octree1 = self.points2octree(points) # the complete octree - scan_axis = tf.py_func(self.gen_scan_axis, [label], tf.float32) + scan_axis = tf.compat.v1.py_func(self.gen_scan_axis, [label], tf.float32) octree0 = octree_scan(octree1, scan_axis) # the transformed octree return octree0, octree1 @@ -70,9 +70,9 @@ def merge_octrees(octrees0, octrees1, *args): dataset = tf.data.TFRecordDataset(record_names).take(take).repeat() if shuffle_size > 1: dataset = dataset.shuffle(shuffle_size) - itr = dataset.map(preprocess, num_parallel_calls=8) \ + itr = tf.compat.v1.data.make_one_shot_iterator(dataset.map(preprocess, num_parallel_calls=8) \ .batch(batch_size).map(merge_octrees, num_parallel_calls=8) \ - .prefetch(8).make_one_shot_iterator() + .prefetch(8)) return itr if return_iter else itr.get_next() @@ -87,7 +87,7 @@ def compute_graph(dataset='train', training=True, reuse=False): convd = network.octree_encoder(octree0, training, reuse) loss, accu = network.octree_decoder(convd, octree0, octree1, training, reuse) - with tf.name_scope('total_loss'): + with tf.compat.v1.name_scope('total_loss'): reg = l2_regularizer('ocnn', FLAGS.LOSS.weight_decay) total_loss = tf.add_n(loss + [reg]) tensors = loss + [reg] + accu + [total_loss] @@ -110,18 +110,18 @@ def decode_shape(self): # checkpoint assert(self.flags.ckpt) - tf_saver = tf.train.Saver(max_to_keep=20) + tf_saver = tf.compat.v1.train.Saver(max_to_keep=20) # start - config = tf.ConfigProto() + config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True - with tf.Session(config=config) as sess: + with tf.compat.v1.Session(config=config) as sess: # restore and initialize self.initialize(sess) print('Load check point: ' + self.flags.ckpt) tf_saver.restore(sess, self.flags.ckpt) logdir = self.flags.logdir - tf.summary.FileWriter(logdir, sess.graph) + tf.compat.v1.summary.FileWriter(logdir, sess.graph) print('Start testing ...') for i in tqdm(range(0, self.flags.test_iter), ncols=80): diff --git a/tensorflow/script/run_linear_cls.py b/tensorflow/script/run_linear_cls.py index 9a018a92..c441842d 100644 --- a/tensorflow/script/run_linear_cls.py +++ b/tensorflow/script/run_linear_cls.py @@ -17,15 +17,15 @@ def __init__(self, flags): self.label = np.load('%s_%s.npy' % (flags.location, flags.y_alias)) def __call__(self): - with tf.name_scope('dataset'): + with tf.compat.v1.name_scope('dataset'): channel = self.data.shape[1] - self.data_ph = tf.placeholder(dtype=tf.float32, shape=[None, channel]) - self.label_ph = tf.placeholder(dtype=tf.int64) + self.data_ph = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, channel]) + self.label_ph = tf.compat.v1.placeholder(dtype=tf.int64) dataset = tf.data.Dataset.from_tensor_slices((self.data_ph, self.label_ph)) if self.flags.shuffle > 1: dataset = dataset.shuffle(self.flags.shuffle) dataset = dataset.batch(self.flags.batch_size).repeat() - self.iter = dataset.make_initializable_iterator() + self.iter = tf.compat.v1.data.make_initializable_iterator(dataset) return self.iter.get_next() def feed_data(self, sess): @@ -43,7 +43,7 @@ def compute_graph(dataset='train', training=True, reuse=False): numpy_dataset = train_dataset if dataset == 'train' else test_dataset data, label = numpy_dataset() # define the linear classifier - with tf.variable_scope('linear', reuse=reuse): + with tf.compat.v1.variable_scope('linear', reuse=reuse): # TODO: Check that whether we need a BN here # data = tf.layers.batch_normalization(data, axis=1, training=training) logit = dense(data, FLAGS.MODEL.nout, use_bias=True) @@ -58,7 +58,7 @@ def compute_graph(dataset='train', training=True, reuse=False): # define the solver class LTFSolver(TFSolver): def initialize(self, sess): - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) train_dataset.feed_data(sess) test_dataset.feed_data(sess) diff --git a/tensorflow/script/run_mid.py b/tensorflow/script/run_mid.py index 8435b91e..13623255 100644 --- a/tensorflow/script/run_mid.py +++ b/tensorflow/script/run_mid.py @@ -16,21 +16,21 @@ # get the label and mask def get_point_info(octree, depth, mask_ratio=0): - with tf.name_scope('points_info'): + with tf.compat.v1.name_scope('points_info'): point_id = get_seg_label(octree, depth) point_segment = tf.reshape(octree_property(octree, property_name='index', dtype=tf.int32, depth=depth, channel=1), [-1]) mask = point_id > -1 # Filter out label -1 if mask_ratio > 0: - mask_shape = tf.shape(mask) + mask_shape = tf.shape(input=mask) mask = tf.logical_and(mask, tf.random.uniform(mask_shape) > mask_ratio) - point_id = tf.boolean_mask(point_id, mask) - point_segment = tf.boolean_mask(point_segment, mask) + point_id = tf.boolean_mask(tensor=point_id, mask=mask) + point_segment = tf.boolean_mask(tensor=point_segment, mask=mask) return point_id, point_segment, mask def compute_graph(reuse=False): - with tf.name_scope('dataset'): + with tf.compat.v1.name_scope('dataset'): flags_data = FLAGS.DATA.train batch_size = flags_data.batch_size octree, shape_id = DatasetFactory(flags_data)() diff --git a/tensorflow/script/run_seg_partnet.py b/tensorflow/script/run_seg_partnet.py index bcf39123..65417688 100644 --- a/tensorflow/script/run_seg_partnet.py +++ b/tensorflow/script/run_seg_partnet.py @@ -13,33 +13,33 @@ # get the label and pts def get_point_info(points, mask_ratio=0, mask=-1): - with tf.name_scope('points_info'): + with tf.compat.v1.name_scope('points_info'): pts = points_property(points, property_name='xyz', channel=4) label = points_property(points, property_name='label', channel=1) label = tf.reshape(label, [-1]) label_mask = label > mask # mask out invalid points, -1 if mask_ratio > 0: # random drop some points to speed up training - rnd_mask = tf.random.uniform(tf.shape(label_mask)) > mask_ratio + rnd_mask = tf.random.uniform(tf.shape(input=label_mask)) > mask_ratio label_mask = tf.logical_and(label_mask, rnd_mask) - pts = tf.boolean_mask(pts, label_mask) - label = tf.boolean_mask(label, label_mask) + pts = tf.boolean_mask(tensor=pts, mask=label_mask) + label = tf.boolean_mask(tensor=label, mask=label_mask) return pts, label # IoU def tf_IoU_per_shape(pred, label, class_num, mask=-1): - with tf.name_scope('IoU'): + with tf.compat.v1.name_scope('IoU'): # Set mask to 0 to filter unlabeled points, whose label is 0 label_mask = label > mask # mask out label - pred = tf.boolean_mask(pred, label_mask) - label = tf.boolean_mask(label, label_mask) - pred = tf.argmax(pred, axis=1, output_type=tf.int32) + pred = tf.boolean_mask(tensor=pred, mask=label_mask) + label = tf.boolean_mask(tensor=label, mask=label_mask) + pred = tf.argmax(input=pred, axis=1, output_type=tf.int32) intsc, union = [None] * class_num, [None] * class_num for k in range(class_num): pk, lk = tf.equal(pred, k), tf.equal(label, k) - intsc[k] = tf.reduce_sum(tf.cast(pk & lk, dtype=tf.float32)) - union[k] = tf.reduce_sum(tf.cast(pk | lk, dtype=tf.float32)) + intsc[k] = tf.reduce_sum(input_tensor=tf.cast(pk & lk, dtype=tf.float32)) + union[k] = tf.reduce_sum(input_tensor=tf.cast(pk | lk, dtype=tf.float32)) return intsc, union @@ -60,7 +60,7 @@ def __call__(self, dataset='train', training=True, reuse=False, gpu_num=1): tower_tensors = [] for i in range(gpu_num): with tf.device('/gpu:%d' % i): - with tf.name_scope('device_%d' % i): + with tf.compat.v1.name_scope('device_%d' % i): octree, _, points = data_iter.get_next() pts, label = get_point_info(points, flags_data.mask_ratio) if not FLAGS.LOSS.point_wise: diff --git a/tensorflow/script/run_seg_partnet_finetune.py b/tensorflow/script/run_seg_partnet_finetune.py index 129584cd..397cceea 100644 --- a/tensorflow/script/run_seg_partnet_finetune.py +++ b/tensorflow/script/run_seg_partnet_finetune.py @@ -55,7 +55,7 @@ def restore(self, sess, ckpt): print('Restore from: ' + ckpt) var_restore = get_variables_with_name( 'ocnn', without='predict_6/conv2', verbose=self.flags.verbose, train_only=False) - tf_saver = tf.train.Saver(var_list=var_restore) + tf_saver = tf.compat.v1.train.Saver(var_list=var_restore) tf_saver.restore(sess, ckpt) diff --git a/tensorflow/script/run_seg_shapenet.py b/tensorflow/script/run_seg_shapenet.py index dc5951b9..5540e685 100644 --- a/tensorflow/script/run_seg_shapenet.py +++ b/tensorflow/script/run_seg_shapenet.py @@ -10,16 +10,16 @@ # get the label and pts def get_point_info(points, mask_ratio=0, mask=-1): - with tf.name_scope('points_info'): + with tf.compat.v1.name_scope('points_info'): pts = points_property(points, property_name='xyz', channel=4) label = points_property(points, property_name='label', channel=1) label = tf.reshape(label, [-1]) label_mask = label > mask # mask out invalid points, -1 if mask_ratio > 0: # random drop some points to speed up training - rnd_mask = tf.random.uniform(tf.shape(label_mask)) > mask_ratio + rnd_mask = tf.random.uniform(tf.shape(input=label_mask)) > mask_ratio label_mask = tf.logical_and(label_mask, rnd_mask) - pts = tf.boolean_mask(pts, label_mask) - label = tf.boolean_mask(label, label_mask) + pts = tf.boolean_mask(tensor=pts, mask=label_mask) + label = tf.boolean_mask(tensor=label, mask=label_mask) return pts, label diff --git a/tensorflow/script/run_seg_shapenet_finetune.py b/tensorflow/script/run_seg_shapenet_finetune.py index 92ee2362..cf4a875a 100644 --- a/tensorflow/script/run_seg_shapenet_finetune.py +++ b/tensorflow/script/run_seg_shapenet_finetune.py @@ -51,7 +51,7 @@ def restore(self, sess, ckpt): print('Restore from: ' + ckpt) var_restore = get_variables_with_name( 'ocnn', without='predict_6/conv2', verbose=self.flags.verbose, train_only=False) - tf_saver = tf.train.Saver(var_list=var_restore) + tf_saver = tf.compat.v1.train.Saver(var_list=var_restore) tf_saver.restore(sess, ckpt) diff --git a/tensorflow/script/tfsolver.py b/tensorflow/script/tfsolver.py index bb5a599f..91ee5ddd 100644 --- a/tensorflow/script/tfsolver.py +++ b/tensorflow/script/tfsolver.py @@ -7,10 +7,13 @@ tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +global glblab + class TFSolver: def __init__(self, flags, compute_graph=None, build_solver=build_solver): self.flags = flags.SOLVER + self.flagsD=flags.DATA self.graph = compute_graph self.build_solver = build_solver @@ -27,8 +30,8 @@ def build_train_graph(self): total_loss = self.train_tensors[train_names.index('total_loss')] solver_param = [total_loss, LRFactory(self.flags)] - if gpu_num > 1: - solver_param.append(gpu_num) + # if gpu_num > 1: + # solver_param.append(gpu_num) self.train_op, lr = self.build_solver(*solver_param) if gpu_num > 1: # average the tensors from different gpus for summaries @@ -64,7 +67,7 @@ def restore(self, sess, ckpt): self.tf_saver.restore(sess, ckpt) def initialize(self, sess): - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) def run_k_iterations(self, sess, k, tensors): num = len(tensors) @@ -82,13 +85,17 @@ def run_k_iterations(self, sess, k, tensors): def result_callback(self, avg_results): return avg_results # calc some metrics, such as IoU, based on the graph output + def len_callback(self): + return 0 #rp added to calc output length + def train(self): # build the computation graph + with tf.Graph().as_default(): self.build_train_graph() # checkpoint start_iter = 1 - self.tf_saver = tf.train.Saver(max_to_keep=self.flags.ckpt_num) + self.tf_saver = tf.compat.v1.train.Saver(max_to_keep=self.flags.ckpt_num) ckpt_path = os.path.join(self.flags.logdir, 'model') if self.flags.ckpt: # restore from the provided checkpoint ckpt = self.flags.ckpt @@ -97,10 +104,11 @@ def train(self): if ckpt: start_iter = int(ckpt[ckpt.find("iter")+5:-5]) + 1 # session - config = tf.ConfigProto(allow_soft_placement=True) + config = tf.compat.v1.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True - with tf.Session(config=config) as sess: - summary_writer = tf.summary.FileWriter(self.flags.logdir, sess.graph) + + with tf.compat.v1.Session(config=config) as sess: + summary_writer = tf.compat.v1.summary.FileWriter(self.flags.logdir, sess.graph) print('Initialize ...') self.initialize(sess) @@ -109,6 +117,7 @@ def train(self): print('Start training ...') for i in tqdm(range(start_iter, self.flags.max_iter + 1), ncols=80): # training + #summary, _,pntLab,seqNo,logit = sess.run([self.summ_train, self.train_op] + TFSolver.glblab) summary, _ = sess.run([self.summ_train, self.train_op]) summary_writer.add_summary(summary, i) @@ -134,13 +143,13 @@ def timeline(self): self.build_train_graph() # session - config = tf.ConfigProto(allow_soft_placement=True) + config = tf.compat.v1.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True - options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) - run_metadata = tf.RunMetadata() + options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE) + run_metadata = tf.compat.v1.RunMetadata() timeline_skip, timeline_iter = 100, 2 - with tf.Session(config=config) as sess: - summary_writer = tf.summary.FileWriter(self.flags.logdir, sess.graph) + with tf.compat.v1.Session(config=config) as sess: + summary_writer = tf.compat.v1.summary.FileWriter(self.flags.logdir, sess.graph) print('Initialize ...') self.initialize(sess) @@ -166,7 +175,7 @@ def param_stats(self): self.build_train_graph() # get variables - train_vars = tf.trainable_variables() + train_vars = tf.compat.v1.trainable_variables() # print total_num = 0 @@ -184,15 +193,15 @@ def test(self): # checkpoint assert(self.flags.ckpt) # the self.flags.ckpt should be provided - tf_saver = tf.train.Saver(max_to_keep=10) + tf_saver = tf.compat.v1.train.Saver(max_to_keep=10) # start num_tensors = len(self.test_tensors) avg_test = [0] * num_tensors - config = tf.ConfigProto(allow_soft_placement=True) - config.gpu_options.allow_growth = True - with tf.Session(config=config) as sess: - summary_writer = tf.summary.FileWriter(self.flags.logdir, sess.graph) + config = tf.compat.v1.ConfigProto(allow_soft_placement=True) + config.gpu_options.allow_growth = True + with tf.compat.v1.Session(config=config) as sess: + summary_writer = tf.compat.v1.summary.FileWriter(self.flags.logdir, sess.graph) self.summ2txt(self.test_names, 'batch') # restore and initialize @@ -201,7 +210,13 @@ def test(self): tf_saver.restore(sess, self.flags.ckpt) print('Start testing ...') - for i in range(0, self.flags.test_iter): + itCnt=self.flags.test_iter #rp** + if itCnt==0: + itCnt=self.len_callback() + if itCnt==0: + itCnt=sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(self.flagsD.test.location)) + + for i in range(0, itCnt): iter_test_result = sess.run(self.test_tensors) iter_test_result = self.result_callback(iter_test_result) # run testing average @@ -216,7 +231,7 @@ def test(self): # Final testing results for j in range(num_tensors): - avg_test[j] /= self.flags.test_iter + avg_test[j] /= itCnt avg_test = self.result_callback(avg_test) # print the results print('Testing done!\n') diff --git a/tensorflow/script_report.txt b/tensorflow/script_report.txt new file mode 100644 index 00000000..84fea532 --- /dev/null +++ b/tensorflow/script_report.txt @@ -0,0 +1,742 @@ +TensorFlow 2.0 Upgrade Script +----------------------------- +Converted 29 files +Detected 16 issues that require attention +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +File: script_14/dataset.py +-------------------------------------------------------------------------------- +script_14/dataset.py:146:12: WARNING: Changing dataset.make_one_shot_iterator() to tf.compat.v1.data.make_one_shot_iterator(dataset). Please check this transformation. + +script_14/dataset.py:163:12: WARNING: Changing dataset.make_one_shot_iterator() to tf.compat.v1.data.make_one_shot_iterator(dataset). Please check this transformation. + +script_14/dataset.py:183:12: WARNING: Changing dataset.make_one_shot_iterator() to tf.compat.v1.data.make_one_shot_iterator(dataset). Please check this transformation. + +-------------------------------------------------------------------------------- +File: script_14/feature.py +-------------------------------------------------------------------------------- +script_14/feature.py:60:2: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +script_14/feature.py:61:2: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +script_14/feature.py:62:2: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +script_14/feature.py:70:4: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +script_14/feature.py:71:4: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +script_14/feature.py:72:4: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +-------------------------------------------------------------------------------- +File: script_14/mid_loss.py +-------------------------------------------------------------------------------- +script_14/mid_loss.py:11:20: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +script_14/mid_loss.py:61:20: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +-------------------------------------------------------------------------------- +File: script_14/ocnn.py +-------------------------------------------------------------------------------- +script_14/ocnn.py:58:14: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +script_14/ocnn.py:62:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +-------------------------------------------------------------------------------- +File: script_14/run_completion.py +-------------------------------------------------------------------------------- +script_14/run_completion.py:73:12: WARNING: Changing dataset.make_one_shot_iterator() to tf.compat.v1.data.make_one_shot_iterator(dataset). Please check this transformation. + +-------------------------------------------------------------------------------- +File: script_14/run_linear_cls.py +-------------------------------------------------------------------------------- +script_14/run_linear_cls.py:28:18: WARNING: Changing dataset.make_initializable_iterator() to tf.compat.v1.data.make_initializable_iterator(dataset). Please check this transformation. + +-------------------------------------------------------------------------------- +File: script_14/tfsolver.py +-------------------------------------------------------------------------------- +script_14/tfsolver.py:136:10: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +================================================================================ +Detailed log follows: + +================================================================================ +================================================================================ +Input tree: 'script_14/' +================================================================================ +-------------------------------------------------------------------------------- +Processing file 'script_14/config.py' + outputting to 'script/config.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/dataset.py' + outputting to 'script/dataset.py' +-------------------------------------------------------------------------------- + +12:37: INFO: Renamed 'tf.FixedLenFeature' to 'tf.io.FixedLenFeature' +13:37: INFO: Renamed 'tf.FixedLenFeature' to 'tf.io.FixedLenFeature' +16:13: INFO: Added keywords to args of function 'tf.parse_single_example' +16:13: INFO: Renamed 'tf.parse_single_example' to 'tf.io.parse_single_example' +24:38: INFO: Renamed 'tf.FixedLenFeature' to 'tf.io.FixedLenFeature' +25:38: INFO: Renamed 'tf.FixedLenFeature' to 'tf.io.FixedLenFeature' +26:32: INFO: Renamed 'tf.FixedLenFeature' to 'tf.io.FixedLenFeature' +29:13: INFO: Added keywords to args of function 'tf.parse_single_example' +29:13: INFO: Renamed 'tf.parse_single_example' to 'tf.io.parse_single_example' +130:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +130:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +146:12: WARNING: Changing dataset.make_one_shot_iterator() to tf.compat.v1.data.make_one_shot_iterator(dataset). Please check this transformation. + +157:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +157:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +163:12: WARNING: Changing dataset.make_one_shot_iterator() to tf.compat.v1.data.make_one_shot_iterator(dataset). Please check this transformation. + +174:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +174:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +183:12: WARNING: Changing dataset.make_one_shot_iterator() to tf.compat.v1.data.make_one_shot_iterator(dataset). Please check this transformation. + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/feature.py' + outputting to 'script/feature.py' +-------------------------------------------------------------------------------- + +20:5: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +60:2: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +61:2: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +62:2: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +70:4: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +71:4: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +72:4: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +75:11: INFO: Renamed 'tf.train.Saver' to 'tf.compat.v1.train.Saver' +76:9: INFO: Renamed 'tf.ConfigProto' to 'tf.compat.v1.ConfigProto' +78:5: INFO: Renamed 'tf.Session' to 'tf.compat.v1.Session' +79:2: INFO: tf.summary.FileWriter requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +79:2: INFO: Renamed 'tf.summary.FileWriter' to 'tf.compat.v1.summary.FileWriter' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/learning_rate.py' + outputting to 'script/learning_rate.py' +-------------------------------------------------------------------------------- + +9:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +16:14: INFO: Renamed 'tf.floordiv' to 'tf.math.floordiv' +27:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +37:11: INFO: Renamed 'tf.train.piecewise_constant' to 'tf.compat.v1.train.piecewise_constant' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/mid_loss.py' + outputting to 'script/mid_loss.py' +-------------------------------------------------------------------------------- + +10:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +11:20: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +11:20: INFO: Renamed 'tf.get_variable' to 'tf.compat.v1.get_variable' +13:22: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +16:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +20:14: INFO: Renamed 'tf.div' to 'tf.compat.v1.div' +25:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +25:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +33:11: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +33:11: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +38:17: INFO: Renamed 'tf.scatter_update' to 'tf.compat.v1.scatter_update' +42:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +42:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +48:27: INFO: Added keywords to args of function 'tf.reduce_sum' +49:19: INFO: Added keywords to args of function 'tf.argmax' +60:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +61:20: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +61:20: INFO: Renamed 'tf.get_variable' to 'tf.compat.v1.get_variable' +63:22: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +70:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +75:18: INFO: Renamed 'tf.segment_sum' to 'tf.math.segment_sum' +87:14: INFO: Renamed 'tf.div' to 'tf.compat.v1.div' +93:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +93:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +105:11: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +105:11: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +114:18: INFO: Renamed 'tf.unsorted_segment_mean' to 'tf.math.unsorted_segment_mean' +122:17: INFO: Renamed 'tf.scatter_update' to 'tf.compat.v1.scatter_update' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/network_ae.py' + outputting to 'script/network_ae.py' +-------------------------------------------------------------------------------- + +13:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +14:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +20:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +24:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +27:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +36:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +38:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +44:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +50:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +55:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +59:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +62:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +63:18: INFO: Added keywords to args of function 'tf.transpose' +68:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +78:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +79:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +82:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +84:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +88:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +92:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +96:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +99:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +103:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +106:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +110:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +122:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +123:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +128:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +133:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +135:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +139:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +149:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +154:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +157:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +159:18: INFO: Added keywords to args of function 'tf.transpose' +161:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +162:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +170:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +173:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +174:17: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +181:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +190:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +191:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +194:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +196:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +202:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +205:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +208:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +211:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +214:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +218:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +222:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/network_aenet.py' + outputting to 'script/network_aenet.py' +-------------------------------------------------------------------------------- + +11:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +12:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +21:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +25:20: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +34:11: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +38:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +45:19: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +50:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +52:20: INFO: Added keywords to args of function 'tf.transpose' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/network_cls.py' + outputting to 'script/network_cls.py' +-------------------------------------------------------------------------------- + +9:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +14:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +19:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +21:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +24:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +26:13: INFO: Added keywords to args of function 'tf.reduce_mean' +29:13: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +31:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +41:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +47:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +51:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +53:13: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +55:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +57:13: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +59:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/network_completion.py' + outputting to 'script/network_completion.py' +-------------------------------------------------------------------------------- + +30:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +31:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +34:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +40:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +45:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +56:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +59:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +68:17: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +72:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +75:18: INFO: Added keywords to args of function 'tf.transpose' +77:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +78:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +84:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +87:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +88:17: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +97:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +99:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +102:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +104:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +109:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +118:17: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +122:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +125:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +128:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +131:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +135:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/network_factory.py' + outputting to 'script/network_factory.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/network_hrnet.py' + outputting to 'script/network_hrnet.py' +-------------------------------------------------------------------------------- + +19:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +27:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +39:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +42:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +44:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +48:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +62:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +65:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +67:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +71:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +89:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +101:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +108:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +112:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +118:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +124:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +130:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +136:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +159:16: INFO: Added keywords to args of function 'tf.boolean_mask' +163:18: INFO: Added keywords to args of function 'tf.boolean_mask' +166:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +168:14: INFO: Added keywords to args of function 'tf.transpose' +185:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +187:14: INFO: Added keywords to args of function 'tf.transpose' +196:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +211:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +213:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +222:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +228:12: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +230:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +240:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +254:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +262:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +264:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +270:17: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +274:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/network_hrnet4t6.py' + outputting to 'script/network_hrnet4t6.py' +-------------------------------------------------------------------------------- + +19:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +27:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +39:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +42:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +44:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +48:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +62:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +65:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +67:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +71:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +89:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +101:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +108:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +112:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +118:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +124:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +130:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +136:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +161:18: INFO: Added keywords to args of function 'tf.boolean_mask' +164:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +166:14: INFO: Added keywords to args of function 'tf.transpose' +183:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +185:14: INFO: Added keywords to args of function 'tf.transpose' +194:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +209:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +211:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +220:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +226:12: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +228:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +238:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +252:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +260:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +262:13: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +267:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/network_unet.py' + outputting to 'script/network_unet.py' +-------------------------------------------------------------------------------- + +15:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +16:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +25:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +34:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +37:15: INFO: Renamed 'tf.layers.dropout' to 'tf.compat.v1.layers.dropout' +41:11: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +56:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +61:15: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +63:20: INFO: Added keywords to args of function 'tf.transpose' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/ocnn.py' + outputting to 'script/ocnn.py' +-------------------------------------------------------------------------------- + +11:11: INFO: Renamed 'tf.trainable_variables' to 'tf.compat.v1.trainable_variables' +11:55: INFO: Renamed 'tf.all_variables' to 'tf.compat.v1.all_variables' +26:11: INFO: Renamed 'tf.layers.flatten' to 'tf.compat.v1.layers.flatten' +27:7: INFO: Renamed 'tf.layers.dense' to 'tf.compat.v1.layers.dense' +28:42: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +33:9: INFO: Renamed 'tf.layers.batch_normalization' to 'tf.compat.v1.layers.batch_normalization' +43:9: INFO: Renamed 'tf.layers.conv2d' to 'tf.compat.v1.layers.conv2d' +45:45: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +49:12: INFO: Renamed 'tf.layers.conv2d' to 'tf.compat.v1.layers.conv2d' +51:48: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +56:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +58:14: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +58:14: INFO: Renamed 'tf.get_variable' to 'tf.compat.v1.get_variable' +59:60: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +62:13: WARNING: tf.get_variable requires manual check. tf.get_variable returns ResourceVariables by default in 2.0, which have well-defined semantics and are stricter about shapes. You can disable this behavior by passing use_resource=False, or by calling tf.compat.v1.disable_resource_variables(). +62:13: INFO: Renamed 'tf.get_variable' to 'tf.compat.v1.get_variable' +63:41: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +90:11: INFO: Renamed 'tf.layers.conv2d_transpose' to 'tf.compat.v1.layers.conv2d_transpose' +92:57: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +93:7: INFO: Renamed 'tf.layers.batch_normalization' to 'tf.compat.v1.layers.batch_normalization' +98:11: INFO: Renamed 'tf.layers.conv2d' to 'tf.compat.v1.layers.conv2d' +100:47: INFO: Changing tf.contrib.layers xavier initializer to a tf.compat.v1.keras.initializers.VarianceScaling and converting arguments. + +101:7: INFO: Renamed 'tf.layers.batch_normalization' to 'tf.compat.v1.layers.batch_normalization' +106:9: INFO: Added keywords to args of function 'tf.nn.avg_pool2d' +106:9: INFO: Renamed keyword argument for tf.nn.avg_pool2d from value to input +112:9: INFO: Added keywords to args of function 'tf.reduce_mean' +117:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +130:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +143:9: INFO: Renamed 'tf.layers.batch_normalization' to 'tf.compat.v1.layers.batch_normalization' +148:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +168:9: INFO: Renamed 'tf.layers.batch_normalization' to 'tf.compat.v1.layers.batch_normalization' +173:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +187:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +190:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +193:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +198:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +206:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +208:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +213:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +222:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +224:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +232:10: INFO: Added keywords to args of function 'tf.argmax' +242:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +242:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +252:13: INFO: tf.losses.sparse_softmax_cross_entropy requires manual check. tf.losses have been replaced with object oriented versions in TF 2.0 and after. The loss function calls have been converted to compat.v1 for backward compatibility. Please update these calls to the TF 2.0 versions. +252:13: INFO: Renamed 'tf.losses.sparse_softmax_cross_entropy' to 'tf.compat.v1.losses.sparse_softmax_cross_entropy' +255:13: INFO: tf.losses.softmax_cross_entropy requires manual check. tf.losses have been replaced with object oriented versions in TF 2.0 and after. The loss function calls have been converted to compat.v1 for backward compatibility. Please update these calls to the TF 2.0 versions. +255:13: INFO: Renamed 'tf.losses.softmax_cross_entropy' to 'tf.compat.v1.losses.softmax_cross_entropy' +260:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +260:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +268:13: INFO: Added keywords to args of function 'tf.reduce_mean' +268:28: INFO: Changed tf.to_float call to tf.cast(..., dtype=tf.float32). +273:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +273:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +274:14: INFO: Added keywords to args of function 'tf.argmax' +280:9: INFO: Added keywords to args of function 'tf.reduce_mean' +280:24: INFO: Added keywords to args of function 'tf.reduce_sum' +286:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +299:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +299:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +303:19: INFO: Added keywords to args of function 'tf.reduce_mean' +309:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +310:17: INFO: Renamed 'tf.get_collection' to 'tf.compat.v1.get_collection' +310:35: INFO: Renamed 'tf.GraphKeys' to 'tf.compat.v1.GraphKeys' +314:15: INFO: Renamed 'tf.train.MomentumOptimizer' to 'tf.compat.v1.train.MomentumOptimizer' +322:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +325:12: INFO: Renamed 'tf.train.MomentumOptimizer' to 'tf.compat.v1.train.MomentumOptimizer' +329:11: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +329:11: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +334:15: INFO: Renamed 'tf.get_collection' to 'tf.compat.v1.get_collection' +334:33: INFO: Renamed 'tf.GraphKeys' to 'tf.compat.v1.GraphKeys' +340:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +340:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +356:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +356:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +359:23: INFO: tf.summary.scalar requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +359:23: INFO: Renamed 'tf.summary.scalar' to 'tf.compat.v1.summary.scalar' +360:11: INFO: tf.summary.merge requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +360:11: INFO: Renamed 'tf.summary.merge' to 'tf.compat.v1.summary.merge' +365:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +365:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +369:30: INFO: Renamed 'tf.placeholder' to 'tf.compat.v1.placeholder' +370:23: INFO: tf.summary.scalar requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +370:23: INFO: Renamed 'tf.summary.scalar' to 'tf.compat.v1.summary.scalar' +371:11: INFO: tf.summary.merge requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +371:11: INFO: Renamed 'tf.summary.merge' to 'tf.compat.v1.summary.merge' +376:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +376:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +384:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +384:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +386:19: INFO: Added keywords to args of function 'tf.boolean_mask' +387:19: INFO: Added keywords to args of function 'tf.boolean_mask' +397:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +397:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +418:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +418:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +420:11: INFO: Added keywords to args of function 'tf.boolean_mask' +421:12: INFO: Added keywords to args of function 'tf.boolean_mask' +422:11: INFO: Added keywords to args of function 'tf.argmax' +427:14: INFO: Added keywords to args of function 'tf.reduce_sum' +428:14: INFO: Added keywords to args of function 'tf.reduce_sum' +429:22: INFO: Added keywords to args of function 'tf.reduce_any' +443:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +443:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +444:19: INFO: Renamed 'tf.get_collection' to 'tf.compat.v1.get_collection' +444:37: INFO: Renamed 'tf.GraphKeys' to 'tf.compat.v1.GraphKeys' +448:17: INFO: Renamed 'tf.train.MomentumOptimizer' to 'tf.compat.v1.train.MomentumOptimizer' +456:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +456:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +458:13: INFO: Added keywords to args of function 'tf.transpose' +463:13: INFO: Added keywords to args of function 'tf.boolean_mask' +464:10: INFO: Added keywords to args of function 'tf.boolean_mask' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_ae.py' + outputting to 'script/run_ae.py' +-------------------------------------------------------------------------------- + +24:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +24:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +44:15: INFO: Renamed 'tf.train.Saver' to 'tf.compat.v1.train.Saver' +47:13: INFO: Renamed 'tf.ConfigProto' to 'tf.compat.v1.ConfigProto' +49:9: INFO: Renamed 'tf.Session' to 'tf.compat.v1.Session' +54:6: INFO: tf.summary.FileWriter requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +54:6: INFO: Renamed 'tf.summary.FileWriter' to 'tf.compat.v1.summary.FileWriter' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_cls.py' + outputting to 'script/run_cls.py' +-------------------------------------------------------------------------------- + +24:13: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +24:13: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_cls_cmd.py' + outputting to 'script/run_cls_cmd.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_cls_finetune.py' + outputting to 'script/run_cls_finetune.py' +-------------------------------------------------------------------------------- + +54:15: INFO: Renamed 'tf.train.Saver' to 'tf.compat.v1.train.Saver' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_completion.py' + outputting to 'script/run_completion.py' +-------------------------------------------------------------------------------- + +55:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +55:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +61:20: INFO: Renamed 'tf.py_func' to 'tf.compat.v1.py_func' +73:12: WARNING: Changing dataset.make_one_shot_iterator() to tf.compat.v1.data.make_one_shot_iterator(dataset). Please check this transformation. + +90:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +90:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +113:15: INFO: Renamed 'tf.train.Saver' to 'tf.compat.v1.train.Saver' +116:13: INFO: Renamed 'tf.ConfigProto' to 'tf.compat.v1.ConfigProto' +118:9: INFO: Renamed 'tf.Session' to 'tf.compat.v1.Session' +124:6: INFO: tf.summary.FileWriter requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +124:6: INFO: Renamed 'tf.summary.FileWriter' to 'tf.compat.v1.summary.FileWriter' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_linear_cls.py' + outputting to 'script/run_linear_cls.py' +-------------------------------------------------------------------------------- + +20:9: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +20:9: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +22:22: INFO: Renamed 'tf.placeholder' to 'tf.compat.v1.placeholder' +23:22: INFO: Renamed 'tf.placeholder' to 'tf.compat.v1.placeholder' +28:18: WARNING: Changing dataset.make_initializable_iterator() to tf.compat.v1.data.make_initializable_iterator(dataset). Please check this transformation. + +46:7: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +61:13: INFO: Renamed 'tf.global_variables_initializer' to 'tf.compat.v1.global_variables_initializer' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_linear_cls_cmd.py' + outputting to 'script/run_linear_cls_cmd.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_mid.py' + outputting to 'script/run_mid.py' +-------------------------------------------------------------------------------- + +19:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +19:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +25:19: INFO: Added keywords to args of function 'tf.shape' +27:15: INFO: Added keywords to args of function 'tf.boolean_mask' +28:20: INFO: Added keywords to args of function 'tf.boolean_mask' +33:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +33:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_seg_partnet.py' + outputting to 'script/run_seg_partnet.py' +-------------------------------------------------------------------------------- + +16:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +16:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +22:35: INFO: Added keywords to args of function 'tf.shape' +24:12: INFO: Added keywords to args of function 'tf.boolean_mask' +25:12: INFO: Added keywords to args of function 'tf.boolean_mask' +31:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +31:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +34:11: INFO: Added keywords to args of function 'tf.boolean_mask' +35:12: INFO: Added keywords to args of function 'tf.boolean_mask' +36:11: INFO: Added keywords to args of function 'tf.argmax' +41:17: INFO: Added keywords to args of function 'tf.reduce_sum' +42:17: INFO: Added keywords to args of function 'tf.reduce_sum' +63:13: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +63:13: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_seg_partnet_cmd.py' + outputting to 'script/run_seg_partnet_cmd.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_seg_partnet_finetune.py' + outputting to 'script/run_seg_partnet_finetune.py' +-------------------------------------------------------------------------------- + +58:15: INFO: Renamed 'tf.train.Saver' to 'tf.compat.v1.train.Saver' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_seg_shapenet.py' + outputting to 'script/run_seg_shapenet.py' +-------------------------------------------------------------------------------- + +13:7: INFO: `name` passed to `name_scope`. Because you may be re-entering an existing scope, it is not safe to convert automatically, the v2 name_scope does not support re-entering scopes by name. + +13:7: INFO: Renamed 'tf.name_scope' to 'tf.compat.v1.name_scope' +19:35: INFO: Added keywords to args of function 'tf.shape' +21:12: INFO: Added keywords to args of function 'tf.boolean_mask' +22:12: INFO: Added keywords to args of function 'tf.boolean_mask' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_seg_shapenet_cmd.py' + outputting to 'script/run_seg_shapenet_cmd.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/run_seg_shapenet_finetune.py' + outputting to 'script/run_seg_shapenet_finetune.py' +-------------------------------------------------------------------------------- + +54:15: INFO: Renamed 'tf.train.Saver' to 'tf.compat.v1.train.Saver' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'script_14/tfsolver.py' + outputting to 'script/tfsolver.py' +-------------------------------------------------------------------------------- + +70:13: INFO: Renamed 'tf.global_variables_initializer' to 'tf.compat.v1.global_variables_initializer' +97:20: INFO: Renamed 'tf.train.Saver' to 'tf.compat.v1.train.Saver' +106:13: INFO: Renamed 'tf.ConfigProto' to 'tf.compat.v1.ConfigProto' +109:9: INFO: Renamed 'tf.Session' to 'tf.compat.v1.Session' +110:23: INFO: tf.summary.FileWriter requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +110:23: INFO: Renamed 'tf.summary.FileWriter' to 'tf.compat.v1.summary.FileWriter' +136:10: WARNING: *.save requires manual check. (This warning is only applicable if the code saves a tf.Keras model) Keras model.save now saves to the Tensorflow SavedModel format by default, instead of HDF5. To continue saving to HDF5, add the argument save_format='h5' to the save() function. +145:13: INFO: Renamed 'tf.ConfigProto' to 'tf.compat.v1.ConfigProto' +147:14: INFO: Renamed 'tf.RunOptions' to 'tf.compat.v1.RunOptions' +147:40: INFO: Renamed 'tf.RunOptions' to 'tf.compat.v1.RunOptions' +148:19: INFO: Renamed 'tf.RunMetadata' to 'tf.compat.v1.RunMetadata' +150:9: INFO: Renamed 'tf.Session' to 'tf.compat.v1.Session' +151:23: INFO: tf.summary.FileWriter requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +151:23: INFO: Renamed 'tf.summary.FileWriter' to 'tf.compat.v1.summary.FileWriter' +177:17: INFO: Renamed 'tf.trainable_variables' to 'tf.compat.v1.trainable_variables' +195:15: INFO: Renamed 'tf.train.Saver' to 'tf.compat.v1.train.Saver' +200:13: INFO: Renamed 'tf.ConfigProto' to 'tf.compat.v1.ConfigProto' +202:9: INFO: Renamed 'tf.Session' to 'tf.compat.v1.Session' +203:23: INFO: tf.summary.FileWriter requires manual check. The TF 1.x summary API cannot be automatically migrated to TF 2.0, so symbols have been converted to tf.compat.v1.summary.* and must be migrated manually. Typical usage will only require changes to the summary writing logic, not to individual calls like scalar(). For examples of the new summary API, see the Effective TF 2.0 migration document or check the TF 2.0 TensorBoard tutorials. +203:23: INFO: Renamed 'tf.summary.FileWriter' to 'tf.compat.v1.summary.FileWriter' +216:31: INFO: Renamed 'tf.python_io.tf_record_iterator' to 'tf.compat.v1.python_io.tf_record_iterator' +-------------------------------------------------------------------------------- + diff --git a/tensorflow/test/test_all.py b/tensorflow/test/test_all.py index e23c68d9..adc04811 100644 --- a/tensorflow/test/test_all.py +++ b/tensorflow/test/test_all.py @@ -2,19 +2,21 @@ import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) -from test_octree2col import Octree2ColTest -from test_octree_conv import OctreeConvTest +#from test_octree2col import Octree2ColTest +#from test_octree_conv import OctreeConvTest from test_octree_deconv import OctreeDeconvTest -from test_octree_property import OctreePropertyTest -from test_octree_search import OctreeSearchTest -from test_octree_linear import OctreeLinearTest -from test_octree_nearest import OctreeNearestTest -from test_octree_gather import OctreeGatherTest -from test_octree_align import OctreeAlignTest -from test_octree2points import Octree2PointsTest -from test_points_property import PointsPropertyTest +#from test_octree_property import OctreePropertyTest +#from test_octree_search import OctreeSearchTest +#from test_octree_linear import OctreeLinearTest +#from test_octree_nearest import OctreeNearestTest +#from test_octree_gather import OctreeGatherTest +#from test_octree_align import OctreeAlignTest +#from test_octree2points import Octree2PointsTest +#from test_points_property import PointsPropertyTest if __name__ == "__main__": os.environ['CUDA_VISIBLE_DEVICES'] = '0' - tf.test.main() \ No newline at end of file + with tf.Graph().as_default(): + tf.test.main() + aa=1 \ No newline at end of file diff --git a/tensorflow/test/test_octree2col.py b/tensorflow/test/test_octree2col.py index 76376f2b..cb18114d 100644 --- a/tensorflow/test/test_octree2col.py +++ b/tensorflow/test/test_octree2col.py @@ -84,7 +84,7 @@ def test_backward(self): out_nn = data_out.eval() shape_in = self.data_in.shape shape_out = out_nn.shape - grad_nn, grad_nm = tf.test.compute_gradient(data_in, shape_in, + grad_nn, grad_nm = tf.compat.v1.test.compute_gradient(data_in, shape_in, data_out, shape_out, delta=0.1) self.assertAllClose(grad_nn, grad_nm, msg='backward: i=%d, j=%d' % (i, j)) diff --git a/tensorflow/test/test_octree_align.py b/tensorflow/test/test_octree_align.py index 24a84900..707efb62 100644 --- a/tensorflow/test/test_octree_align.py +++ b/tensorflow/test/test_octree_align.py @@ -14,7 +14,7 @@ def test_forward_backward1(self): data_out, idx = octree_align(data_in, octree, octree, depth=5) idx_gt = tf.range(16, dtype=tf.int32) - grad = tf.gradients(data_out, data_in)[0] + grad = tf.gradients(ys=data_out, xs=data_in)[0] grad_gt = np.ones([1, 3, 16, 1]) with self.cached_session() as sess: @@ -31,7 +31,7 @@ def test_forward_backward2(self): data_gt = tf.concat([data_in, np.zeros([1, 3, 8, 1], np.float32)], axis=2) idx_gt = tf.range(8, dtype=tf.int32) - grad = tf.gradients(data_out, data_in)[0] + grad = tf.gradients(ys=data_out, xs=data_in)[0] grad_gt = np.ones([1, 3, 8, 1]) with self.cached_session() as sess: @@ -48,7 +48,7 @@ def test_forward_backward3(self): data_gt = data_in[:, :, :8, :] idx_gt = list(range(8)) + [-1] * 8 - grad = tf.gradients(data_out, data_in)[0] + grad = tf.gradients(ys=data_out, xs=data_in)[0] grad_gt = tf.concat([np.ones([1, 3, 8, 1]), np.zeros([1, 3, 8, 1])], axis=2) with self.cached_session() as sess: diff --git a/tensorflow/test/test_octree_conv.py b/tensorflow/test/test_octree_conv.py index 1dc2b9a1..f9b83440 100644 --- a/tensorflow/test/test_octree_conv.py +++ b/tensorflow/test/test_octree_conv.py @@ -19,24 +19,24 @@ def forward_and_backward(self, kernel_size, stride, idx=0): data = tf.constant(np.random.uniform(-1.0, 1.0, [1, channel, height, 1]).astype('float32')) # forward - with tf.variable_scope('conv_%d' % idx) as scope: + with tf.compat.v1.variable_scope('conv_%d' % idx) as scope: conv_fast = octree_conv_fast(data, octree, depth, num_outputs, kernel_size, stride) scope.reuse_variables() conv_mem = octree_conv_memory(data, octree, depth, num_outputs, kernel_size, stride) # get kernel - t_vars = tf.trainable_variables() + t_vars = tf.compat.v1.trainable_variables() for var in t_vars: if ('conv_%d' % idx) in var.name: kernel = var # backward - grad_fast, kernel_fast = tf.gradients(conv_fast, [data, kernel]) - grad_mem, kernel_mem = tf.gradients(conv_mem, [data, kernel]) + grad_fast, kernel_fast = tf.gradients(ys=conv_fast, xs=[data, kernel]) + grad_mem, kernel_mem = tf.gradients(ys=conv_mem, xs=[data, kernel]) # test with self.cached_session() as sess: - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) # print('stride: ', stride, ', kernel_size: ', kernel_size) self.assertAllEqual(conv_fast, conv_mem) @@ -56,4 +56,5 @@ def test_forward_and_backward(self): if __name__ == "__main__": os.environ['CUDA_VISIBLE_DEVICES'] = '0' - tf.test.main() \ No newline at end of file + with tf.Graph().as_default(): + tf.test.main() \ No newline at end of file diff --git a/tensorflow/test/test_octree_deconv.py b/tensorflow/test/test_octree_deconv.py index 06fa6dba..68318b6c 100644 --- a/tensorflow/test/test_octree_deconv.py +++ b/tensorflow/test/test_octree_deconv.py @@ -24,20 +24,20 @@ def test_forward_and_backward_2x2(self): deconv_fast = octree_deconv_fast(data, octree, depth, num_outputs, kernel_size, stride) # reference - kernel = tf.trainable_variables()[0] + kernel = tf.compat.v1.trainable_variables()[0] kernel_deconv = tf.reshape(kernel, [channel, num_outputs, 1, -1]) - kernel_deconv = tf.transpose(kernel_deconv, [3, 2, 1, 0]) + kernel_deconv = tf.transpose(a=kernel_deconv, perm=[3, 2, 1, 0]) depad = octree_depad(data, octree, depth) deconv_gt = tf.nn.conv2d_transpose(depad, kernel_deconv, strides=[1, 1, 8, 1], output_shape=[1, num_outputs, 320, 1], data_format='NCHW') # backward - grad_fast, kernel_fast = tf.gradients(deconv_fast, [data, kernel]) - grad_gt, kernel_gt = tf.gradients(deconv_gt, [data, kernel]) + grad_fast, kernel_fast = tf.gradients(ys=deconv_fast, xs=[data, kernel]) + grad_gt, kernel_gt = tf.gradients(ys=deconv_gt, xs=[data, kernel]) # test with self.cached_session() as sess: - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) self.assertAllClose(deconv_fast, deconv_gt) self.assertAllClose(grad_fast, grad_gt) @@ -53,24 +53,24 @@ def forward_and_backward(self, kernel_size, stride, idx=0): data = tf.constant(np.random.uniform(-1.0, 1.0, [1, channel, height, 1]).astype('float32')) # forward - with tf.variable_scope('deconv_%d' % idx) as scope: + with tf.compat.v1.variable_scope('deconv_%d' % idx) as scope: conv_fast = octree_deconv_fast(data, octree, depth, num_outputs, kernel_size, stride) scope.reuse_variables() conv_mem = octree_deconv_memory(data, octree, depth, num_outputs, kernel_size, stride) # get kernel - t_vars = tf.trainable_variables() + t_vars = tf.compat.v1.trainable_variables() for var in t_vars: if ('deconv_%d' % idx) in var.name: kernel = var # backward - grad_fast, kernel_fast = tf.gradients(conv_fast, [data, kernel]) - grad_mem, kernel_mem = tf.gradients(conv_mem, [data, kernel]) + grad_fast, kernel_fast = tf.gradients(ys=conv_fast, xs=[data, kernel]) + grad_mem, kernel_mem = tf.gradients(ys=conv_mem, xs=[data, kernel]) # test with self.cached_session() as sess: - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) # print('stride: ', stride, ', kernel_size: ', kernel_size) self.assertAllClose(conv_fast, conv_mem) @@ -94,4 +94,5 @@ def test_forward_and_backward(self): if __name__ == "__main__": os.environ['CUDA_VISIBLE_DEVICES'] = '0' - tf.test.main() \ No newline at end of file + with tf.Graph().as_default(): + tf.test.main() \ No newline at end of file diff --git a/tensorflow/test/test_octree_gather.py b/tensorflow/test/test_octree_gather.py index 875c881f..19f39279 100644 --- a/tensorflow/test/test_octree_gather.py +++ b/tensorflow/test/test_octree_gather.py @@ -14,8 +14,8 @@ def test_forward_backward(self): out1 = tf.gather(data, index, axis=2) out2 = octree_gather(data, index) - grad1 = tf.gradients(out1, data) - grad2 = tf.gradients(out2, data) + grad1 = tf.gradients(ys=out1, xs=data) + grad2 = tf.gradients(ys=out2, xs=data) with self.cached_session() as sess: d, o1, o2, g1, g2 = sess.run([data, out1, out2, grad1, grad2]) diff --git a/tensorflow/test/test_octree_grow.py b/tensorflow/test/test_octree_grow.py index 04a2d7ae..fbdb4686 100644 --- a/tensorflow/test/test_octree_grow.py +++ b/tensorflow/test/test_octree_grow.py @@ -14,7 +14,7 @@ octree = octree_grow(octree, target_depth=1, full_octree=True) octree = octree_grow(octree, target_depth=2, full_octree=True) -octree_gt = tf.decode_raw(octree_samples('octree_2'), out_type=tf.int8) +octree_gt = tf.io.decode_raw(octree_samples('octree_2'), out_type=tf.int8) for d in range(2, depth + 1): child = octree_child(octree_gt, depth=d) label = tf.cast(child > -1, tf.int32) diff --git a/tensorflow/test/test_octree_key.py b/tensorflow/test/test_octree_key.py index 57eedd24..01ff2dff 100644 --- a/tensorflow/test/test_octree_key.py +++ b/tensorflow/test/test_octree_key.py @@ -5,7 +5,7 @@ sys.path.append("..") from libs import * -tf.enable_eager_execution() +tf.compat.v1.enable_eager_execution() class OctreeKeyTest(tf.test.TestCase): diff --git a/tensorflow/test/test_octree_linear.py b/tensorflow/test/test_octree_linear.py index 81631e5e..a7aec6b0 100644 --- a/tensorflow/test/test_octree_linear.py +++ b/tensorflow/test/test_octree_linear.py @@ -36,7 +36,7 @@ def test_forward_backward(self): # backward bilinear_shape = b2.shape - grad_nn, grad_nm = tf.test.compute_gradient( + grad_nn, grad_nm = tf.compat.v1.test.compute_gradient( data, data_shape, bilinear2, bilinear_shape, delta=0.1) self.assertAllClose(grad_nn, grad_nm) diff --git a/tensorflow/test/test_octree_nearest.py b/tensorflow/test/test_octree_nearest.py index efdcbf99..43322a4d 100644 --- a/tensorflow/test/test_octree_nearest.py +++ b/tensorflow/test/test_octree_nearest.py @@ -29,7 +29,7 @@ def test_forward_backward(self): self.assertAllClose(b1, b2) # backward - grad_nn, grad_nm = tf.test.compute_gradient( + grad_nn, grad_nm = tf.compat.v1.test.compute_gradient( data, data_shape, nearest2, b2.shape, delta=0.1) self.assertAllClose(grad_nn, grad_nm) diff --git a/tensorflow/test/test_transform_points.py b/tensorflow/test/test_transform_points.py index d7999ae3..d0cf20be 100644 --- a/tensorflow/test/test_transform_points.py +++ b/tensorflow/test/test_transform_points.py @@ -5,7 +5,7 @@ sys.path.append('..') from libs import * -tf.enable_eager_execution() +tf.compat.v1.enable_eager_execution() filename = 'scene0000_00_000.points' diff --git a/tensorflow/test_report.txt b/tensorflow/test_report.txt new file mode 100644 index 00000000..fc560db5 --- /dev/null +++ b/tensorflow/test_report.txt @@ -0,0 +1,148 @@ +TensorFlow 2.0 Upgrade Script +----------------------------- +Converted 15 files +Detected 0 issues that require attention +-------------------------------------------------------------------------------- +================================================================================ +Detailed log follows: + +================================================================================ +================================================================================ +Input tree: 'test_14/' +================================================================================ +-------------------------------------------------------------------------------- +Processing file 'test_14/test_all.py' + outputting to 'test/test_all.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree2col.py' + outputting to 'test/test_octree2col.py' +-------------------------------------------------------------------------------- + +87:29: INFO: Renamed 'tf.test.compute_gradient' to 'tf.compat.v1.test.compute_gradient' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree2points.py' + outputting to 'test/test_octree2points.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_align.py' + outputting to 'test/test_octree_align.py' +-------------------------------------------------------------------------------- + +17:11: INFO: Added keywords to args of function 'tf.gradients' +34:11: INFO: Added keywords to args of function 'tf.gradients' +51:11: INFO: Added keywords to args of function 'tf.gradients' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_conv.py' + outputting to 'test/test_octree_conv.py' +-------------------------------------------------------------------------------- + +22:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +28:13: INFO: Renamed 'tf.trainable_variables' to 'tf.compat.v1.trainable_variables' +34:29: INFO: Added keywords to args of function 'tf.gradients' +35:29: INFO: Added keywords to args of function 'tf.gradients' +39:15: INFO: Renamed 'tf.global_variables_initializer' to 'tf.compat.v1.global_variables_initializer' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_deconv.py' + outputting to 'test/test_octree_deconv.py' +-------------------------------------------------------------------------------- + +27:13: INFO: Renamed 'tf.trainable_variables' to 'tf.compat.v1.trainable_variables' +29:20: INFO: Added keywords to args of function 'tf.transpose' +35:29: INFO: Added keywords to args of function 'tf.gradients' +36:29: INFO: Added keywords to args of function 'tf.gradients' +40:15: INFO: Renamed 'tf.global_variables_initializer' to 'tf.compat.v1.global_variables_initializer' +56:9: INFO: Renamed 'tf.variable_scope' to 'tf.compat.v1.variable_scope' +62:13: INFO: Renamed 'tf.trainable_variables' to 'tf.compat.v1.trainable_variables' +68:29: INFO: Added keywords to args of function 'tf.gradients' +69:29: INFO: Added keywords to args of function 'tf.gradients' +73:15: INFO: Renamed 'tf.global_variables_initializer' to 'tf.compat.v1.global_variables_initializer' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_gather.py' + outputting to 'test/test_octree_gather.py' +-------------------------------------------------------------------------------- + +17:12: INFO: Added keywords to args of function 'tf.gradients' +18:12: INFO: Added keywords to args of function 'tf.gradients' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_grow.py' + outputting to 'test/test_octree_grow.py' +-------------------------------------------------------------------------------- + +17:12: INFO: Renamed 'tf.decode_raw' to 'tf.io.decode_raw' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_key.py' + outputting to 'test/test_octree_key.py' +-------------------------------------------------------------------------------- + +8:0: INFO: Renamed 'tf.enable_eager_execution' to 'tf.compat.v1.enable_eager_execution' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_linear.py' + outputting to 'test/test_octree_linear.py' +-------------------------------------------------------------------------------- + +39:25: INFO: Renamed 'tf.test.compute_gradient' to 'tf.compat.v1.test.compute_gradient' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_nearest.py' + outputting to 'test/test_octree_nearest.py' +-------------------------------------------------------------------------------- + +32:25: INFO: Renamed 'tf.test.compute_gradient' to 'tf.compat.v1.test.compute_gradient' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_property.py' + outputting to 'test/test_octree_property.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_octree_search.py' + outputting to 'test/test_octree_search.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_points_property.py' + outputting to 'test/test_points_property.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'test_14/test_transform_points.py' + outputting to 'test/test_transform_points.py' +-------------------------------------------------------------------------------- + +8:0: INFO: Renamed 'tf.enable_eager_execution' to 'tf.compat.v1.enable_eager_execution' +-------------------------------------------------------------------------------- + diff --git a/tensorflow/util/convert_tfrecords.py b/tensorflow/util/convert_tfrecords.py index d06db40f..92d1d069 100644 --- a/tensorflow/util/convert_tfrecords.py +++ b/tensorflow/util/convert_tfrecords.py @@ -1,4 +1,5 @@ import os +import sys import argparse import tensorflow as tf from random import shuffle @@ -34,7 +35,7 @@ def load_octree(file): def write_data_to_tfrecords(file_dir, list_file, records_name, file_type): [data, label, index] = get_data_label_pair(list_file) - writer = tf.python_io.TFRecordWriter(records_name) + writer = tf.io.TFRecordWriter(records_name) for i in range(len(data)): if not i % 1000: print('data loaded: {}/{}'.format(i, len(data))) @@ -48,6 +49,39 @@ def write_data_to_tfrecords(file_dir, list_file, records_name, file_type): writer.write(example.SerializeToString()) writer.close() +def write_data_to_tfrecords2octree(file_dir, list_file, records_name, file_type): + [data, label, index] = get_data_label_pair(list_file) + + writer = tf.io.TFRecordWriter(records_name) + xDirectory=file_dir.replace('octree','xoctree') + + xMapper={} + xfilenames = sorted(os.listdir(xDirectory)) + for xfilename in xfilenames: + xMapper[xfilename[2:11]] =xfilename + + for i in range(len(data)): + if not i % 1000: + print('data loaded: {}/{}'.format(i, len(data))) + + fname=data[i].split('/')[-1] + + if fname[1:10] not in xMapper: + continue + #P032040000_055356_36_RL_75.octree + octree_file1 = load_octree(os.path.join(file_dir, fname)) + #CM032040005_054132_63_RL_75.octree + octree_file2 = load_octree(os.path.join(xDirectory,xMapper[fname[1:10]])) + + feature = {'data1': _bytes_feature(octree_file1), + 'data2': _bytes_feature(octree_file2), + 'label': _int64_feature(label[i]), + 'index': _int64_feature(index[i]), + 'filename': _bytes_feature(('%06d_%s' % (i, data[i])).encode('utf8'))} + example = tf.train.Example(features=tf.train.Features(feature=feature)) + writer.write(example.SerializeToString()) + writer.close() + def get_data_label_pair(list_file): file_list = [] @@ -71,9 +105,16 @@ def get_data_label_pair(list_file): if __name__ == '__main__': + print(sys.argv) args = parser.parse_args() shuffle_data = args.shuffle - write_data_to_tfrecords(args.file_dir, + if args.file_type=="2data" : + write_data_to_tfrecords2octree(args.file_dir, + args.list_file, + args.records_name, + args.file_type) + else: + write_data_to_tfrecords(args.file_dir, args.list_file, args.records_name, args.file_type) diff --git a/tensorflow/util/octree_samples.py b/tensorflow/util/octree_samples.py index 3a92b10b..726cc9da 100644 --- a/tensorflow/util/octree_samples.py +++ b/tensorflow/util/octree_samples.py @@ -4,7 +4,7 @@ sys.path.append("..") from libs import octree_samples -tf.enable_eager_execution() +tf.compat.v1.enable_eager_execution() # dump the octree samples for i in range(1, 7): diff --git a/tensorflow/util/parse_tfevents.py b/tensorflow/util/parse_tfevents.py index 2742e42b..07ed0b98 100644 --- a/tensorflow/util/parse_tfevents.py +++ b/tensorflow/util/parse_tfevents.py @@ -14,7 +14,7 @@ args = parser.parse_args() -for e in tf.train.summary_iterator(args.event): +for e in tf.compat.v1.train.summary_iterator(args.event): has_value = False msg = '{}'.format(e.step) for v in e.summary.value: diff --git a/tensorflow/util/revert_tfrecords.py b/tensorflow/util/revert_tfrecords.py index f164c7b1..69bd9de0 100644 --- a/tensorflow/util/revert_tfrecords.py +++ b/tensorflow/util/revert_tfrecords.py @@ -16,7 +16,7 @@ def read_data_from_tfrecords(records_name, output_path, list_file, file_type, count): - records_iterator = tf.python_io.tf_record_iterator(records_name) + records_iterator = tf.compat.v1.python_io.tf_record_iterator(records_name) count = count if count != 0 else float('Inf') with open(os.path.join(output_path, list_file), "w") as f: diff --git a/tensorflow/util_report.txt b/tensorflow/util_report.txt new file mode 100644 index 00000000..6f058aa2 --- /dev/null +++ b/tensorflow/util_report.txt @@ -0,0 +1,53 @@ +TensorFlow 2.0 Upgrade Script +----------------------------- +Converted 5 files +Detected 0 issues that require attention +-------------------------------------------------------------------------------- +================================================================================ +Detailed log follows: + +================================================================================ +================================================================================ +Input tree: 'util_14/' +================================================================================ +-------------------------------------------------------------------------------- +Processing file 'util_14/convert_tfrecords.py' + outputting to 'util/convert_tfrecords.py' +-------------------------------------------------------------------------------- + +38:11: INFO: Renamed 'tf.python_io.TFRecordWriter' to 'tf.io.TFRecordWriter' +55:11: INFO: Renamed 'tf.python_io.TFRecordWriter' to 'tf.io.TFRecordWriter' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'util_14/octree_samples.py' + outputting to 'util/octree_samples.py' +-------------------------------------------------------------------------------- + +7:0: INFO: Renamed 'tf.enable_eager_execution' to 'tf.compat.v1.enable_eager_execution' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'util_14/parse_ckpt.py' + outputting to 'util/parse_ckpt.py' +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'util_14/parse_tfevents.py' + outputting to 'util/parse_tfevents.py' +-------------------------------------------------------------------------------- + +17:9: INFO: Renamed 'tf.train.summary_iterator' to 'tf.compat.v1.train.summary_iterator' +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +Processing file 'util_14/revert_tfrecords.py' + outputting to 'util/revert_tfrecords.py' +-------------------------------------------------------------------------------- + +19:21: INFO: Renamed 'tf.python_io.tf_record_iterator' to 'tf.compat.v1.python_io.tf_record_iterator' +-------------------------------------------------------------------------------- +