diff --git a/docker/Dockerfile b/docker/Dockerfile index ef8084a..9979730 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,10 +1,10 @@ -# Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual # property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:21.08-py3 @@ -49,3 +49,5 @@ RUN pip install meshzoo ipdb imageio gputil h5py point-cloud-utils imageio image # HDR image support RUN imageio_download_bin freeimage + +RUN apt-get install -y libxi6 libgconf-2-4 libfontconfig1 libxrender1 diff --git a/docker/make_image.sh b/docker/make_image.sh old mode 100644 new mode 100755 diff --git a/render_shapenet_data/.gitignore b/render_shapenet_data/.gitignore new file mode 100644 index 0000000..6d320f1 --- /dev/null +++ b/render_shapenet_data/.gitignore @@ -0,0 +1,4 @@ +*.blend1 +shapenet_rendered +shapenet +tmp.out \ No newline at end of file diff --git a/render_shapenet_data/README.md b/render_shapenet_data/README.md index 9454533..40467dc 100644 --- a/render_shapenet_data/README.md +++ b/render_shapenet_data/README.md @@ -1,28 +1,98 @@ -## Render Shapenet Dataset +# Render ShapeNet Dataset +This script will help you render ShapeNet and custom datasets that follow ShapeNet conventions. -- Download shapenet V1 dataset following the [official link](https://shapenet.org/) and - unzip the downloaded file `unzip SHAPENET_SYNSET_ID.zip`. -- Download Blender following the [official link](https://www.blender.org/), we used - Blender **v2.90.0**, we haven't tested on other versions. -- Install required libraries: +## Prerequisites +- Python 3.7 or higher +- Blender 2.9 or higher +- OSX, Linux or Windows running WSL2 -```bash +## Setup +- Download the ShapeNet V1 or V2 dataset following the [official link](https://shapenet.org/) +- Make a new folder called shapenet in this directory, and unzip the downloaded file: `mkdir shapenet && unzip SHAPENET_SYNSET_ID.zip -d shapenet` +- Download Blender following the [official link](https://www.blender.org/) + +## Installing Required Libraries +You will need the following libraries on Linux: +``` apt-get install -y libxi6 libgconf-2-4 libfontconfig1 libxrender1 +``` + +Blender ships with its own distribution of Python, which you will need to add some libraries to: +```bash cd BLENDER_PATH/2.90/python/bin ./python3.7m -m ensurepip -./python3.7m -m pip install numpy +./python3.7m -m pip install numpy ``` -- Running the render script: +## Data +The rendering script looks for datasets in the dataset_list.json file. You can modify this to add your own files and paths or point to your own JSON dataset list using the `--dataset_list ` flag when invoking `render_all.py` + +- For **ShapeNetCore.v1**, you don't need to do any preprocesing. If you are using your own dataset, you should make sure that your models are sorted into directories with a "model.obj" in them, following the expected conventions of ShapeNetCore.v1. + +- For **ShapeNetCore.v2**, make sure to pass the `--shapenet_version 2` flag to the `render_all.py` script -- this will destructively normalize your dataset folder to match the expected structure of ShapeNetCore.v1, while retaining the original .obj and .mtl file names +## Rendering + +### Quick Start +Once you've modified dataset_list.json or added the ShapeNet data that reflects the source training data paths, you can render your data like this: ```bash -python render_all.py --save_folder PATH_TO_SAVE_IMAGE --dataset_folder PATH_TO_3D_OBJ --blender_root PATH_TO_BLENDER +python render_all.py +``` + +**Note:** The code will save the output from blender to `tmp.out`, this is not necessary for training, and can be removed by `rm -rf tmp.out` + +## Additional Flags +You can customize the rendering script by adding flags. + +### Switch to Eevee for dramatically faster rendering speed +By default, the Blender renderer uses Cycles, which has a photorealistic look but is slow (>10s/frame). You can also use Eevee, which may be more game-like in look but renders much much faster (<.3s/frame), and may be suitable for extracting a high quality dataset on lower end machines in a reasonable amount of time. +``` +python render_all.py --engine EEVEE +``` + +### Render ShapeNet V2 +For ShapeNetCore.v2 you will need to pass a flag to the render script to pre-process your data: +```bash +python render_all.py --shapenet_version 2 +``` + +### Log to Console +By default, the script will log to a tmp.out file (quiet mode), but you can override this: +``` +python render_all.py --quiet_mode 0 + +``` +### Set Number of Views to Capture +The default for the rendering script is to capture 24 views per object. However, many NeRF pipeline recommend closer to 100 images. Especially if you are working with a limited but high quality dataset, you should consider increasing the total number of views 2-4x +``` +python render_all.py --num_views 96 +``` + +### Override Arguments +By default, the rendering script will save outputs to "shapenet_rendered", read all datasets from dataset_list.json and use the default Blender installation in your system. However, you can override these arguments: +```bash +python render_all.py --save_folder PATH_TO_SAVE_IMAGE --dataset_list PATH_TO_DATASET_JSON --blender_root PATH_TO_BLENDER +``` + +## Modifying the Render Scene +You can open the base scenes (located in the blender directory) and modify the lighting. There are no objects in the scene, so you will need to import a test object. Just be careful to remove any scene objects before you save. + +## Comparison Between Cycles and Eevee +Cycles is on the **Left**, Eevee is on the **Right** +
+drawing drawing + +To Render Eevee headlessly +``` +!apt-get install python-opengl -y +!apt install xvfb -y +!pip install pyvirtualdisplay +!pip install piglet +python3 render_parallel.py --num_views 96 --engine EEVEE --headless ``` -- (Optional) The code will save the output from blender to `tmp.out`, this is not - necessary for training, and can be removed by `rm -rf tmp.out` +## Attribution +- This code is adopted from this [GitHub repo](https://github.com/panmari/stanford-shapenet-renderer), we thank the author for sharing the codes! -- This code is adopted from - this [GitHub repo](https://github.com/panmari/stanford-shapenet-renderer), we thank the - author for sharing the codes! \ No newline at end of file +- The tome in the rendering comparison images was borrowed with permission from the [Loot Assets](https://github.com/webaverse/loot-assets) library. \ No newline at end of file diff --git a/render_shapenet_data/blender/cycles_renderer.blend b/render_shapenet_data/blender/cycles_renderer.blend new file mode 100644 index 0000000..92b5836 Binary files /dev/null and b/render_shapenet_data/blender/cycles_renderer.blend differ diff --git a/render_shapenet_data/blender/eevee_renderer.blend b/render_shapenet_data/blender/eevee_renderer.blend new file mode 100644 index 0000000..9b78669 Binary files /dev/null and b/render_shapenet_data/blender/eevee_renderer.blend differ diff --git a/render_shapenet_data/dataset_list.json b/render_shapenet_data/dataset_list.json new file mode 100644 index 0000000..7bd133a --- /dev/null +++ b/render_shapenet_data/dataset_list.json @@ -0,0 +1,20 @@ +[ + { + "name": "Car", + "id": "02958343", + "scale": 0.9, + "directory": "./shapenet/02958343" + }, + { + "name": "Chair", + "id": "03001627", + "scale": 0.7, + "directory": "./shapenet/03001627" + }, + { + "name": "Motorbike", + "id": "03790512", + "scale": 0.9, + "directory": "./shapenet/03790512" + } +] diff --git a/render_shapenet_data/docs_img/cycles.png b/render_shapenet_data/docs_img/cycles.png new file mode 100644 index 0000000..e72a6e8 Binary files /dev/null and b/render_shapenet_data/docs_img/cycles.png differ diff --git a/render_shapenet_data/docs_img/eevee.png b/render_shapenet_data/docs_img/eevee.png new file mode 100644 index 0000000..c52190d Binary files /dev/null and b/render_shapenet_data/docs_img/eevee.png differ diff --git a/render_shapenet_data/render_all.py b/render_shapenet_data/render_all.py index 5d23be7..642857f 100644 --- a/render_shapenet_data/render_all.py +++ b/render_shapenet_data/render_all.py @@ -8,37 +8,145 @@ import os import argparse +import json +import subprocess +from multiprocessing.pool import ThreadPool +import subprocess + +# Connect EFS +# /home/user/mirage-dev/GET3D/render_shapenet_data/mirageml-dev/aman/experiements/GET3D/render_shapenet_data +# sudo sshfs ubuntu@ec2-3-95-21-26.compute-1.amazonaws.com:/home/ubuntu/mirage-dev/ /home/user/mirage-dev/GET3D/render_shapenet_data/mirageml-dev -o IdentityFile=/home/user/mirage-dev/GET3D/render_shapenet_data/mirage-omniverse.pem -o allow_other parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.') parser.add_argument( - '--save_folder', type=str, default='./tmp', + '--save_folder', type=str, default='./shapenet_rendered', help='path for saving rendered image') parser.add_argument( - '--dataset_folder', type=str, default='./tmp', - help='path for downloaded 3d dataset folder') + '--dataset_list', type=str, default='./dataset_list.json', + help='path to a json linking datasets') parser.add_argument( - '--blender_root', type=str, default='./tmp', + '--blender_root', type=str, default='blender', help='path for blender') +parser.add_argument( + '--shapenet_version', type=str, default='1', + help='ShapeNet version 1 or 2') +parser.add_argument( + '--num_views', type=str, default='24', + help='Number of views to capture per object') +parser.add_argument( + '--engine', type=str, default='CYCLES', + help='Use CYCLES or EEVEE - CYCLES is a realistic path tracer (slow), EEVEE is a real-time renderer (fast)') +parser.add_argument( + '--quiet_mode', type=bool, default=1, + help='Route output of console to log file') args = parser.parse_args() + +engine = args.engine +quiet_mode = args.quiet_mode save_folder = args.save_folder -dataset_folder = args.dataset_folder +dataset_list = args.dataset_list blender_root = args.blender_root +shapenet_version = args.shapenet_version +num_views = args.num_views -synset_list = [ - '02958343', # Car - '03001627', # Chair - '03790512' # Motorbike -] -scale_list = [ - 0.9, - 0.7, - 0.9 -] -for synset, obj_scale in zip(synset_list, scale_list): - file_list = sorted(os.listdir(os.path.join(dataset_folder, synset))) - for idx, file in enumerate(file_list): - render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 >> tmp.out' % ( - blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale +# check if dataset_list exists, throw error if not +if not os.path.exists(dataset_list): + raise ValueError('dataset_list does not exist!') + +# check if save_folder exists +if not os.path.exists(save_folder): + os.makedirs(save_folder) + +scale_list = [] +path_list = [] + +# read and parse json file at dataset_list.json +with open(dataset_list, 'r') as f: + dataset = json.load(f) + +# example json entry: +# { +# "name": "Car", +# "id": "02958343", +# "scale": 0.9, +# "directory": "./shapenet/02958343" +# } +for entry in dataset: + scale_list.append(entry['scale']) + path_list.append(entry['directory']) + + +# for shapenet v2, we normalize the model location +if shapenet_version == '2': + for obj_scale, dataset_folder in zip(scale_list, path_list): + file_list = sorted(os.listdir(os.path.join(dataset_folder))) + for file in file_list: + # check if file_list+'/models' exists + if os.path.exists(os.path.join(dataset_folder, file, 'models')): + # move all files in file_list+'/models' to file_list + os.system('mv ' + os.path.join(dataset_folder, file, 'models/*') + ' ' + os.path.join(dataset_folder, file)) + # remove file_list+'/models' if it exists + os.system('rm -rf ' + os.path.join(dataset_folder, file, 'models')) + material_file = os.path.join(dataset_folder, file, 'model_normalized.mtl') + # read material_file as a text file, replace any instance of '../images' with './images' + with open(material_file, 'r') as f: + material_file_text = f.read() + material_file_text = material_file_text.replace('../images', './images') + # write the modified text to material_file + with open(material_file, 'w') as f: + f.write(material_file_text) + +# ShapeNetCore v2 normalizes the scale and orientation of the models and the names are changed as a result +model_name = 'model.obj' +if shapenet_version == '2': + model_name = 'model_normalized.obj' + +suffix = '' +if(args.quiet_mode == '1'): + suffix = ' >> tmp.out' + +import pdb; +for obj_scale, dataset_folder in zip(scale_list, path_list): + file_list = sorted(os.listdir(os.path.join(dataset_folder))) + num = None # set to the number of workers you want (it defaults to the cpu count of your machine) + tp = ThreadPool(num) + def work(file): + output_dir = "/home/user/mirage-dev/GET3D/render_shapenet_data/mirageml-dev/aman/experiements/GET3D/shapenet_rendered" + camera_dir = os.path.abspath(os.path.join(save_folder, "camera", dataset_folder.split("/")[-1], file)) + camera_save_dir = os.path.join(output_dir, "camera", dataset_folder.split("/")[-1], file) + img_dir = os.path.abspath(os.path.join(save_folder, "img", dataset_folder.split("/")[-1], file)) + img_save_dir = os.path.join(output_dir, "img", dataset_folder.split("/")[-1], file) + + if os.path.exists(camera_save_dir) and os.path.exists(img_save_dir): + print("Files Exist on EFS; ", file) + if os.path.exists(camera_dir) and os.path.exists(img_dir): + print("Removing Local: ",file) + subprocess.call(["rm", "-rf", camera_dir]) + subprocess.call(["rm", "-rf", img_dir]) + return + elif os.path.exists(camera_dir) and os.path.exists(img_dir): + print("Files Exist Locally Moving to EFS: ", file) + subprocess.call(["mv", camera_dir, camera_save_dir]) + subprocess.call(["mv", img_dir, img_save_dir]) + return + + print("Rendering: ", file) + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix ) os.system(render_cmd) + + print("Moving:", camera_dir, camera_save_dir) + subprocess.call(["mv", camera_dir, camera_save_dir]) + print("Moving", img_dir, img_save_dir) + subprocess.call(["mv", img_dir, img_save_dir]) + + for idx, file in enumerate(file_list): + tp.apply_async(work, (file,)) + + tp.close() + tp.join() + + + diff --git a/render_shapenet_data/render_parallel.py b/render_shapenet_data/render_parallel.py new file mode 100644 index 0000000..7467063 --- /dev/null +++ b/render_shapenet_data/render_parallel.py @@ -0,0 +1,184 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. + +import os +import argparse +import json +import time +import subprocess + +parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.') +parser.add_argument( + '--save_folder', type=str, default='./shapenet_rendered', + help='path for saving rendered image') +parser.add_argument( + '--dataset_list', type=str, default='./dataset_list.json', + help='path to a json linking datasets') +parser.add_argument( + '--blender_root', type=str, default='blender', + help='path for blender') +parser.add_argument( + '--shapenet_version', type=str, default='1', + help='ShapeNet version 1 or 2') +parser.add_argument( + '--num_views', type=str, default='24', + help='Number of views to capture per object') +parser.add_argument( + '--engine', type=str, default='CYCLES', + help='Use CYCLES or EEVEE - CYCLES is a realistic path tracer (slow), EEVEE is a real-time renderer (fast)') +parser.add_argument( + '--quiet_mode', type=bool, default=1, + help='Route output of console to log file') +parser.add_argument( + '--headless', action='store_true', default=False, help='Run blender in headless mode') +args = parser.parse_args() + +engine = args.engine +quiet_mode = args.quiet_mode +save_folder = args.save_folder +dataset_list = args.dataset_list +blender_root = args.blender_root +shapenet_version = args.shapenet_version +num_views = args.num_views + +if args.headless and args.engine == 'EEVEE': + from pyvirtualdisplay import Display + Display().start() + +# check if dataset_list exists, throw error if not +if not os.path.exists(dataset_list): + raise ValueError('dataset_list does not exist!') + +# check if save_folder exists +if not os.path.exists(save_folder): + os.makedirs(save_folder) + +scale_list = [] +path_list = [] + +# read and parse json file at dataset_list.json +with open(dataset_list, 'r') as f: + dataset = json.load(f) + +# example json entry: +# { +# "name": "Car", +# "id": "02958343", +# "scale": 0.9, +# "directory": "./shapenet/02958343" +# } +for entry in dataset: + scale_list.append(entry['scale']) + path_list.append(entry['directory']) + + +# for shapenet v2, we normalize the model location +if shapenet_version == '2': + for obj_scale, dataset_folder in zip(scale_list, path_list): + file_list = sorted(os.listdir(os.path.join(dataset_folder))) + for file in file_list: + # check if file_list+'/models' exists + if os.path.exists(os.path.join(dataset_folder, file, 'models')): + # move all files in file_list+'/models' to file_list + os.system('mv ' + os.path.join(dataset_folder, file, 'models/*') + ' ' + os.path.join(dataset_folder, file)) + # remove file_list+'/models' if it exists + os.system('rm -rf ' + os.path.join(dataset_folder, file, 'models')) + material_file = os.path.join(dataset_folder, file, 'model_normalized.mtl') + # read material_file as a text file, replace any instance of '../images' with './images' + with open(material_file, 'r') as f: + material_file_text = f.read() + material_file_text = material_file_text.replace('../images', './images') + # write the modified text to material_file + with open(material_file, 'w') as f: + f.write(material_file_text) + +# ShapeNetCore v2 normalizes the scale and orientation of the models and the names are changed as a result +model_name = 'model.obj' +if shapenet_version == '2': + model_name = 'model_normalized.obj' + +suffix = '' +if(args.quiet_mode == '1'): + suffix = ' >> tmp.out' + +for obj_scale, dataset_folder in zip(scale_list, path_list): + file_list = sorted(os.listdir(os.path.join(dataset_folder))) + idx = 0 + start_time = time.time() + while idx < len(file_list): + print("Done with %d/%d" % (idx, len(file_list))) + + stdout = open('stdout.txt', 'w') + stderr = open('stderr.txt', 'w') + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s --gpu 0' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix + ) + p0 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s --gpu 1' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix + ) + p1 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s --gpu 2' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix + ) + p2 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s --gpu 3' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix + ) + p3 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s --gpu 4' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix + ) + p4 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s --gpu 5' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix + ) + p5 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s --gpu 6' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix + ) + p6 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views %s --engine %s%s --gpu 7' % ( + blender_root, save_folder, os.path.join(dataset_folder, file, model_name), obj_scale, num_views, engine, suffix + ) + p7 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + p0.wait() + p1.wait() + p2.wait() + p3.wait() + p4.wait() + p5.wait() + p6.wait() + p7.wait() + + end_time = time.time() + print('Time for rendering %d models: %f' % (len(file_list), end_time - start_time)) \ No newline at end of file diff --git a/render_shapenet_data/render_shapenet.py b/render_shapenet_data/render_shapenet.py index fe9f274..8b63806 100644 --- a/render_shapenet_data/render_shapenet.py +++ b/render_shapenet_data/render_shapenet.py @@ -8,9 +8,8 @@ import argparse, sys, os, math, re import bpy -from mathutils import Vector, Matrix +from mathutils import Vector import numpy as np -import json parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.') parser.add_argument( @@ -28,39 +27,26 @@ parser.add_argument( '--format', type=str, default='PNG', help='Format of files generated. Either PNG or OPEN_EXR') -parser.add_argument( - '--resolution', type=int, default=512, - help='Resolution of the images.') parser.add_argument( '--engine', type=str, default='CYCLES', - help='Blender internal engine for rendering. E.g. CYCLES, BLENDER_EEVEE, ...') + help='Blender internal engine for rendering. either CYCLES or EEVEE, ...') +parser.add_argument( + '--gpu', type=int, default=0, + help='gpu.') argv = sys.argv[sys.argv.index("--") + 1:] args = parser.parse_args(argv) +if args.engine == 'CYCLES': + bpy.ops.wm.open_mainfile(filepath=os.path.abspath("./blender/cycles_renderer.blend")) +else: + bpy.ops.wm.open_mainfile(filepath=os.path.abspath("./blender/eevee_renderer.blend")) + # Set up rendering context = bpy.context scene = bpy.context.scene render = bpy.context.scene.render -render.engine = args.engine -render.image_settings.color_mode = 'RGBA' # ('RGB', 'RGBA', ...) -render.image_settings.file_format = args.format # ('PNG', 'OPEN_EXR', 'JPEG, ...) -render.resolution_x = args.resolution -render.resolution_y = args.resolution -render.resolution_percentage = 100 -bpy.context.scene.cycles.filter_width = 0.01 -bpy.context.scene.render.film_transparent = True - -bpy.context.scene.cycles.device = 'GPU' -bpy.context.scene.cycles.diffuse_bounces = 1 -bpy.context.scene.cycles.glossy_bounces = 1 -bpy.context.scene.cycles.transparent_max_bounces = 3 -bpy.context.scene.cycles.transmission_bounces = 3 -bpy.context.scene.cycles.samples = 32 -bpy.context.scene.cycles.use_denoising = True - - def enable_cuda_devices(): prefs = bpy.context.preferences cprefs = prefs.addons['cycles'].preferences @@ -83,20 +69,13 @@ def enable_cuda_devices(): # If we have CUDA/OPENCL devices, enable only them, otherwise enable # all devices (assumed to be CPU) print(cprefs.devices) - for device in cprefs.devices: - device.use = not accelerated or device.type in acceleratedTypes + for idx, device in enumerate(cprefs.devices): + device.use = (not accelerated or device.type in acceleratedTypes) and idx == args.gpu print('Device enabled ({type}) = {enabled}'.format(type=device.type, enabled=device.use)) return accelerated - enable_cuda_devices() -context.active_object.select_set(True) -bpy.ops.object.delete() - -# Import textured mesh -bpy.ops.object.select_all(action='DESELECT') - def bounds(obj, local=False): local_coords = obj.bound_box[:] @@ -125,39 +104,6 @@ def bounds(obj, local=False): o_details = collections.namedtuple('object_details', 'x y z') return o_details(**originals) -# function from https://github.com/panmari/stanford-shapenet-renderer/blob/master/render_blender.py -def get_3x4_RT_matrix_from_blender(cam): - # bcam stands for blender camera - # R_bcam2cv = Matrix( - # ((1, 0, 0), - # (0, 1, 0), - # (0, 0, 1))) - - # Transpose since the rotation is object rotation, - # and we want coordinate rotation - # R_world2bcam = cam.rotation_euler.to_matrix().transposed() - # T_world2bcam = -1*R_world2bcam @ location - # - # Use matrix_world instead to account for all constraints - location, rotation = cam.matrix_world.decompose()[0:2] - R_world2bcam = rotation.to_matrix().transposed() - - # Convert camera location to translation vector used in coordinate changes - # T_world2bcam = -1*R_world2bcam @ cam.location - # Use location from matrix_world to account for constraints: - T_world2bcam = -1*R_world2bcam @ location - - # # Build the coordinate transform matrix from world to computer vision camera - # R_world2cv = R_bcam2cv@R_world2bcam - # T_world2cv = R_bcam2cv@T_world2bcam - - # put into 3x4 matrix - RT = Matrix(( - R_world2bcam[0][:] + (T_world2bcam[0],), - R_world2bcam[1][:] + (T_world2bcam[1],), - R_world2bcam[2][:] + (T_world2bcam[2],) - )) - return RT imported_object = bpy.ops.import_scene.obj(filepath=args.obj, use_edges=False, use_smooth_groups=False, split_mode='OFF') @@ -191,91 +137,36 @@ def get_3x4_RT_matrix_from_blender(cam): mesh_obj.scale[2] /= factor bpy.ops.object.transform_apply(scale=True) -bpy.ops.object.light_add(type='AREA') -light2 = bpy.data.lights['Area'] - -light2.energy = 30000 -bpy.data.objects['Area'].location[2] = 0.5 -bpy.data.objects['Area'].scale[0] = 100 -bpy.data.objects['Area'].scale[1] = 100 -bpy.data.objects['Area'].scale[2] = 100 - -# Place camera +# Get reference to camera and empty (rotation pivot) cam = scene.objects['Camera'] -cam.location = (0, 1.2, 0) # radius equals to 1 -cam.data.lens = 35 -cam.data.sensor_width = 32 - -cam_constraint = cam.constraints.new(type='TRACK_TO') -cam_constraint.track_axis = 'TRACK_NEGATIVE_Z' -cam_constraint.up_axis = 'UP_Y' - -cam_empty = bpy.data.objects.new("Empty", None) -cam_empty.location = (0, 0, 0) -cam.parent = cam_empty - -scene.collection.objects.link(cam_empty) -context.view_layer.objects.active = cam_empty -cam_constraint.target = cam_empty +cam_empty = scene.objects['Empty'] stepsize = 360.0 / args.views rotation_mode = 'XYZ' model_identifier = os.path.split(os.path.split(args.obj)[0])[1] +print('model identifier: ' + model_identifier) synset_idx = args.obj.split('/')[-3] +print('synset idx: ' + synset_idx) -img_follder = os.path.join(os.path.abspath(args.output_folder), 'img', synset_idx, model_identifier) -camera_follder = os.path.join(os.path.abspath(args.output_folder), 'camera', synset_idx, model_identifier) +img_folder = os.path.join(os.path.abspath(args.output_folder), 'img', synset_idx, model_identifier) +camera_folder = os.path.join(os.path.abspath(args.output_folder), 'camera', synset_idx, model_identifier) -os.makedirs(img_follder, exist_ok=True) -os.makedirs(camera_follder, exist_ok=True) +os.makedirs(img_folder, exist_ok=True) +os.makedirs(camera_folder, exist_ok=True) rotation_angle_list = np.random.rand(args.views) elevation_angle_list = np.random.rand(args.views) rotation_angle_list = rotation_angle_list * 360 elevation_angle_list = elevation_angle_list * 30 -np.save(os.path.join(camera_follder, 'rotation'), rotation_angle_list) -np.save(os.path.join(camera_follder, 'elevation'), elevation_angle_list) - -# creation of the transform.json -to_export = { - 'camera_angle_x': bpy.data.cameras[0].angle_x, - "aabb": [[-scale/2,-scale/2,-scale/2], - [scale/2,scale/2,scale/2]] -} -frames = [] +np.save(os.path.join(camera_folder, 'rotation'), rotation_angle_list) +np.save(os.path.join(camera_folder, 'elevation'), elevation_angle_list) for i in range(0, args.views): cam_empty.rotation_euler[2] = math.radians(rotation_angle_list[i]) cam_empty.rotation_euler[0] = math.radians(elevation_angle_list[i]) print("Rotation {}, {}".format((stepsize * i), math.radians(stepsize * i))) - render_file_path = os.path.join(img_follder, '%03d.png' % (i)) + render_file_path = os.path.join(img_folder, '%03d.png' % (i)) scene.render.filepath = render_file_path bpy.ops.render.render(write_still=True) - # might not need it, but just in case cam is not updated correctly - bpy.context.view_layer.update() - - rt = get_3x4_RT_matrix_from_blender(cam) - pos, rt, scale = cam.matrix_world.decompose() - - rt = rt.to_matrix() - matrix = [] - for ii in range(3): - a = [] - for jj in range(3): - a.append(rt[ii][jj]) - a.append(pos[ii]) - matrix.append(a) - matrix.append([0,0,0,1]) - print(matrix) - - to_add = {\ - "file_path":f'{str(i).zfill(3)}.png', - "transform_matrix":matrix - } - frames.append(to_add) - -to_export['frames'] = frames -with open(f'{img_follder}/transforms.json', 'w') as f: - json.dump(to_export, f,indent=4)