pyrender 渲染mesh
目录
render_meshes函数
调用函数
render_meshes函数
def overlay_human_meshes(humans, K, model, img_pil, unique_color=False):
# Color of humans seen in the image.
_color = [color[0] for _ in range(len(humans))] if unique_color else color
# Get focal and princpt for rendering.
focal = np.asarray([K[0,0,0].cpu().numpy(),K[0,1,1].cpu().numpy()])
princpt = np.asarray([K[0,0,-1].cpu().numpy(),K[0,1,-1].cpu().numpy()])
# Get the vertices produced by the model.
verts_list = [humans[j]['v3d'].cpu().numpy() for j in range(len(humans))]
faces_list = [model.smpl_layer['neutral_10'].bm_x.faces for j in range(len(humans))]
# Render the meshes onto the image.
pred_rend_array = render_meshes(np.asarray(img_pil),
verts_list,
faces_list,
{'focal': focal, 'princpt': princpt},
alpha=1.0,
color=_color)
return pred_rend_array, _color
def render_meshes(img, l_mesh, l_face, cam_param, color=None, alpha=1.0,
show_camera=False,
intensity=3.0,
metallicFactor=0., roughnessFactor=0.5, smooth=True,
):
"""
Rendering multiple mesh and project then in the initial image.
Args:
- img: np.array [w,h,3]
- l_mesh: np.array list of [v,3]
- l_face: np.array list of [f,3]
- cam_param: info about the camera intrinsics (focal, princpt) and (R,t) is possible
Return:
- img: np.array [w,h,3]
"""
# scene
scene = pyrender.Scene(ambient_light=(0.3, 0.3, 0.3))
# mesh
for i, mesh in enumerate(l_mesh):
if color is None:
_color = (np.random.choice(range(1,225))/255, np.random.choice(range(1,225))/255, np.random.choice(range(1,225))/255)
else:
if isinstance(color,list):
_color = color[i]
elif isinstance(color,tuple):
_color = color
else:
raise NotImplementedError
mesh = trimesh.Trimesh(mesh, l_face[i])
# import ipdb
# ipdb.set_trace()
# mesh.visual = trimesh.visual.TextureVisuals(
# uv=None,
# material=trimesh.visual.material.PBRMaterial(
# metallicFactor=metallicFactor,
# roughnessFactor=roughnessFactor,
# alphaMode='OPAQUE',
# baseColorFactor=(_color[0], _color[1], _color[2], 1.0)
# ),
# image=None,
# face_materials=None
# )
# print('saving')
# mesh.export('human.obj')
# mesh = trimesh.load('human.obj')
# print('loading')
# mesh = pyrender.Mesh.from_trimesh(mesh, smooth=smooth)
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=metallicFactor,
roughnessFactor=roughnessFactor,
alphaMode='OPAQUE',
baseColorFactor=(_color[0], _color[1], _color[2], 1.0))
mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=smooth)
scene.add(mesh, f"mesh_{i}")
# Adding coordinate system at (0,0,2) for the moment
# Using lines defined in pyramid https://docs.pyvista.org/version/stable/api/utilities/_autosummary/pyvista.Pyramid.html
if show_camera:
import pyvista
def get_faces(x):
return x.faces.astype(np.uint32).reshape((x.n_faces, 4))[:, 1:]
# Camera = Box + Cone (or Cylinder?)
material_cam = pyrender.MetallicRoughnessMaterial(metallicFactor=metallicFactor, roughnessFactor=roughnessFactor, alphaMode='OPAQUE', baseColorFactor=(0.5,0.5,0.5))
height = 0.2
radius = 0.1
cone = pyvista.Cone(center=(0.0, 0.0, -height/2), direction=(0.0, 0.0, -1.0), height=height, radius=radius).extract_surface().triangulate()
verts = cone.points
mesh = pyrender.Mesh.from_trimesh(trimesh.Trimesh(verts, get_faces(cone)), material=material_cam, smooth=smooth)
scene.add(mesh, f"cone")
size = 0.1
box = pyvista.Box(bounds=(-size, size,
-size, size,
verts[:,-1].min() - 3*size, verts[:,-1].min())).extract_surface().triangulate()
verts = box.points
mesh = pyrender.Mesh.from_trimesh(trimesh.Trimesh(verts, get_faces(box)), material=material_cam, smooth=smooth)
scene.add(mesh, f"box")
# Coordinate system
# https://docs.pyvista.org/version/stable/api/utilities/_autosummary/pyvista.Arrow.html
l_color = [(1,0,0,1.0), (0,1,0,1.0), (0,0,1,1.0)]
l_direction = [(1,0,0), (0,1,0), (0,0,1)]
scale = 0.2
pose3d = [2*scale, 0.0, -scale]
for i in range(len(l_color)):
arrow = pyvista.Arrow(direction=l_direction[i], scale=scale)
arrow = arrow.extract_surface().triangulate()
verts = arrow.points + np.asarray([pose3d])
faces = arrow.faces.astype(np.uint32).reshape((arrow.n_faces, 4))[:, 1:]
mesh = trimesh.Trimesh(verts, faces)
material = pyrender.MetallicRoughnessMaterial(metallicFactor=metallicFactor, roughnessFactor=roughnessFactor, alphaMode='OPAQUE', baseColorFactor=l_color[i])
mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=smooth)
scene.add(mesh, f"arrow_{i}")
focal, princpt = cam_param['focal'], cam_param['princpt']
camera_pose = np.eye(4)
if 'R' in cam_param.keys():
camera_pose[:3, :3] = cam_param['R']
if 't' in cam_param.keys():
camera_pose[:3, 3] = cam_param['t']
camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1])
# camera
camera_pose = OPENCV_TO_OPENGL_CAMERA_CONVENTION @ camera_pose
camera_pose = np.linalg.inv(camera_pose)
scene.add(camera, pose=camera_pose)
# renderer
renderer = pyrender.OffscreenRenderer(viewport_width=img.shape[1], viewport_height=img.shape[0], point_size=1.0)
# light
light = pyrender.DirectionalLight(intensity=intensity)
scene.add(light, pose=camera_pose)
# render
rgb, depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)
rgb = rgb[:,:,:3].astype(np.float32)
fg = (depth > 0)[:,:,None].astype(np.float32)
# Simple smoothing of the mask
bg_blending_radius = 1
bg_blending_kernel = 2.0 * torch.ones((1, 1, 2 * bg_blending_radius + 1, 2 * bg_blending_radius + 1)) / (2 * bg_blending_radius + 1) ** 2
bg_blending_bias = -torch.ones(1)
fg = fg.reshape((fg.shape[0],fg.shape[1]))
fg = torch.from_numpy(fg).unsqueeze(0)
fg = torch.clamp_min(torch.nn.functional.conv2d(fg, weight=bg_blending_kernel, bias=bg_blending_bias, padding=bg_blending_radius) * fg, 0.0)
fg = fg.permute(1,2,0).numpy()
# Alpha-blending
img = (fg * (alpha * rgb + (1.0-alpha) * img) + (1-fg) * img).astype(np.uint8)
renderer.delete()
return img.astype(np.uint8)
调用函数
p_x, p_y = None, None
K = get_camera_parameters(model.img_size, fov=args.fov, p_x=p_x, p_y=p_y)
print(i,K)
# Make model predictions
start = time.time()
humans = forward_model(model, x, K,
det_thresh=args.det_thresh,
nms_kernel_size=args.nms_kernel_size)
duration = time.time() - start
l_duration.append(duration)
if len(humans)==0:
print('----------humans:0--------')
if len(humans)>0:
# Superimpose predicted human meshes to the input image.
img_array = np.asarray(img_pil_nopad)
img_pil_visu= Image.fromarray(img_array)
pred_rend_array, _color = overlay_human_meshes(humans, K, model, img_pil_visu, unique_color=args.unique_color)