-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathplotting.py
More file actions
2040 lines (1610 loc) · 76.2 KB
/
plotting.py
File metadata and controls
2040 lines (1610 loc) · 76.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
FocalPlotter - fSpy-like camera matching tool
Vanishing point estimation and camera solving
"""
import bpy
import gpu
import math
from gpu_extras.batch import batch_for_shader
from bpy.types import Panel, Operator, PropertyGroup
from bpy.props import (
BoolProperty,
PointerProperty,
FloatVectorProperty,
StringProperty,
)
from mathutils import Vector, Matrix, Euler
from .ui import draw_checkbox
# ---------------------------------------------------------------------------
# Data Structures
# ---------------------------------------------------------------------------
class FOCALPLOTTER_PG_axis_lines(PropertyGroup):
"""Line pair data for one axis (4 points total) - stored in normalized camera coords (0-1)"""
line1_start: FloatVectorProperty(size=2, default=(0.2, 0.4))
line1_end: FloatVectorProperty(size=2, default=(0.6, 0.4))
line2_start: FloatVectorProperty(size=2, default=(0.2, 0.6))
line2_end: FloatVectorProperty(size=2, default=(0.6, 0.6))
class FOCALPLOTTER_PG_plot_settings(PropertyGroup):
"""Settings for the focal plotter"""
is_plotting: BoolProperty(default=False)
use_reference_image: BoolProperty(name="Add Reference Image", default=False)
reference_image: PointerProperty(name="Reference Image", type=bpy.types.Image)
set_render_resolution: BoolProperty(
name="Set Render Resolution",
description="Set render resolution to match image size (affects entire scene)",
default=False,
)
use_rule_of_thirds: BoolProperty(name="Rule of Thirds", default=False)
# Axis selection (fSpy-style: two selectors instead of three toggles)
axis_1: bpy.props.EnumProperty(
name="First Axis",
description="World axis for the first vanishing point (typically horizontal)",
items=[
('x', "X", "X axis (red)"),
('y', "Y", "Y axis (green)"),
('z', "Z", "Z axis (blue)"),
],
default='x',
update=lambda self, ctx: update_camera_from_vanishing_points(ctx) if self.is_plotting else None,
)
axis_2: bpy.props.EnumProperty(
name="Second Axis",
description="World axis for the second vanishing point (typically horizontal)",
items=[
('x', "X", "X axis (red)"),
('y', "Y", "Y axis (green)"),
('z', "Z", "Z axis (blue)"),
],
default='y',
update=lambda self, ctx: update_camera_from_vanishing_points(ctx) if self.is_plotting else None,
)
auto_solve: BoolProperty(name="Auto Solve Camera", default=True)
# Quality of life settings
freeze_guides: BoolProperty(
name="Freeze Guides",
description="Prevent dragging to check alignment",
default=False,
)
show_horizon: BoolProperty(
name="Show Horizon Line",
description="Display the horizon line while plotting",
default=True,
)
camera_distance: bpy.props.FloatProperty(
name="Distance",
description="Distance from camera to target point. Affects camera position, not rotation",
default=10.0,
min=0.1,
soft_max=100.0,
unit='LENGTH',
update=lambda self, ctx: update_camera_position_only(ctx) if self.is_plotting else None,
)
principal_point_x: bpy.props.FloatProperty(
name="X",
description="Optical center X offset. Use if the camera lens wasn't centered when photo was taken",
default=0.0,
min=-0.5,
max=0.5,
subtype='FACTOR',
update=lambda self, ctx: update_focal_length_only(ctx) if self.is_plotting else None,
)
principal_point_y: bpy.props.FloatProperty(
name="Y",
description="Optical center Y offset. Use if the camera lens wasn't centered when photo was taken",
default=0.0,
min=-0.5,
max=0.5,
subtype='FACTOR',
update=lambda self, ctx: update_focal_length_only(ctx) if self.is_plotting else None,
)
target_mode: bpy.props.EnumProperty(
name="Target Location",
description="Where the camera points to",
items=[
('WORLD_CENTER', "World Center", "Camera targets world origin (0,0,0)"),
('MANUAL', "Manual", "Set target location manually"),
],
default='WORLD_CENTER',
update=lambda self, ctx: update_camera_position_only(ctx) if self.is_plotting else None,
)
target_location: bpy.props.FloatVectorProperty(
name="Target",
description="Manual target location - camera will be positioned at 'Distance' away from this point",
default=(0.0, 0.0, 0.0),
subtype='TRANSLATION',
update=lambda self, ctx: update_camera_position_only(ctx) if self.is_plotting else None,
)
keep_bg_image: bpy.props.BoolProperty(
name="Keep Background Image",
description="Keep the background image on camera after stopping plotting",
default=True,
)
# Original settings storage
original_passepartout: FloatVectorProperty(size=2, default=(0.0, 0.5))
original_show_name: BoolProperty(default=False)
original_show_composition_thirds: BoolProperty(default=False)
original_show_bg_images: BoolProperty(default=False)
modified_camera: StringProperty(default="")
# ---------------------------------------------------------------------------
# Utility Functions
# ---------------------------------------------------------------------------
def is_in_camera_view(context):
space = context.space_data
if space is None or space.type != 'VIEW_3D':
return False
return space.region_3d.view_perspective == 'CAMERA'
def get_active_camera(context):
return context.scene.camera
def get_closest_point_distance(context, obj, camera):
"""
Calculate the distance from camera to the closest point on an object.
Works with meshes and curves (including bevelled curves).
Returns (distance, point_world) or (None, None) if not calculable.
"""
if obj is None or camera is None:
return None, None
cam_loc = camera.location
# Get evaluated object (applies modifiers, curve to mesh conversion, etc.)
depsgraph = context.evaluated_depsgraph_get()
try:
# Get the evaluated version of the object
obj_eval = obj.evaluated_get(depsgraph)
# Try to get mesh data from the evaluated object
# This works for meshes and curves (curves get converted to mesh)
try:
mesh = obj_eval.to_mesh()
except RuntimeError:
# Object can't be converted to mesh (e.g., empty, camera, light)
return None, None
if mesh is None or len(mesh.vertices) == 0:
return None, None
# Get the world matrix for transforming vertices
world_matrix = obj.matrix_world
min_dist = float('inf')
closest_point = None
# Check all vertices
for vert in mesh.vertices:
# Transform vertex to world space
world_pos = world_matrix @ vert.co
dist = (world_pos - cam_loc).length
if dist < min_dist:
min_dist = dist
closest_point = world_pos.copy()
# Clean up the temporary mesh
obj_eval.to_mesh_clear()
if min_dist == float('inf'):
return None, None
return min_dist, closest_point
except Exception:
return None, None
def format_distance_with_units(context, distance):
"""
Format a distance value using Blender's current unit settings.
Returns a formatted string with appropriate units.
"""
unit_settings = context.scene.unit_settings
if unit_settings.system == 'NONE':
return f"{distance:.3f}"
# Get the scale factor and unit system
scale = unit_settings.scale_length
scaled_distance = distance * scale
if unit_settings.system == 'METRIC':
if unit_settings.length_unit == 'ADAPTIVE':
# Use adaptive units
if scaled_distance >= 1000:
return f"{scaled_distance / 1000:.3f} km"
elif scaled_distance >= 1:
return f"{scaled_distance:.3f} m"
elif scaled_distance >= 0.01:
return f"{scaled_distance * 100:.3f} cm"
else:
return f"{scaled_distance * 1000:.3f} mm"
elif unit_settings.length_unit == 'KILOMETERS':
return f"{scaled_distance / 1000:.3f} km"
elif unit_settings.length_unit == 'METERS':
return f"{scaled_distance:.3f} m"
elif unit_settings.length_unit == 'CENTIMETERS':
return f"{scaled_distance * 100:.3f} cm"
elif unit_settings.length_unit == 'MILLIMETERS':
return f"{scaled_distance * 1000:.3f} mm"
elif unit_settings.length_unit == 'MICROMETERS':
return f"{scaled_distance * 1000000:.3f} um"
else:
return f"{scaled_distance:.3f} m"
elif unit_settings.system == 'IMPERIAL':
# Convert meters to feet (1 meter = 3.28084 feet)
feet = scaled_distance * 3.28084
if unit_settings.length_unit == 'ADAPTIVE':
if feet >= 5280: # Miles threshold
return f"{feet / 5280:.3f} mi"
elif feet >= 1:
return f"{feet:.3f} ft"
else:
return f"{feet * 12:.3f} in"
elif unit_settings.length_unit == 'MILES':
return f"{feet / 5280:.3f} mi"
elif unit_settings.length_unit == 'FEET':
return f"{feet:.3f} ft"
elif unit_settings.length_unit == 'INCHES':
return f"{feet * 12:.3f} in"
elif unit_settings.length_unit == 'THOU':
return f"{feet * 12000:.3f} thou"
else:
return f"{feet:.3f} ft"
# Fallback
return f"{distance:.3f}"
def get_plot_settings(context):
return context.scene.focalplotter_plot
def get_axis_data(context, axis):
return getattr(context.scene, f'focalplotter_axis_{axis}')
def get_camera_frame_bounds(context):
"""
Get the camera frame bounds in screen coordinates using Blender's native API.
Returns (min_x, min_y, width, height) or None.
"""
from bpy_extras.view3d_utils import location_3d_to_region_2d
region = context.region
rv3d = context.space_data.region_3d if context.space_data else None
camera = get_active_camera(context)
if region is None or rv3d is None or camera is None:
return None
# Get camera view frame corners (in camera local space)
frame = camera.data.view_frame(scene=context.scene)
# Transform corners to world space
cam_matrix = camera.matrix_world
corners_world = [cam_matrix @ corner for corner in frame]
# Project to screen coordinates
corners_screen = []
for corner in corners_world:
screen_co = location_3d_to_region_2d(region, rv3d, corner)
if screen_co is None:
return None
corners_screen.append((screen_co.x, screen_co.y))
# Find bounding box from corners (handles any order)
xs = [c[0] for c in corners_screen]
ys = [c[1] for c in corners_screen]
min_x, max_x = min(xs), max(xs)
min_y, max_y = min(ys), max(ys)
return (min_x, min_y, max_x - min_x, max_y - min_y)
def normalized_to_screen(context, norm_pos):
"""
Convert normalized camera coords (0-1) to screen pixel coords.
(0,0) = bottom-left of camera frame, (1,1) = top-right.
"""
bounds = get_camera_frame_bounds(context)
if bounds is None:
return None
min_x, min_y, width, height = bounds
x = min_x + norm_pos[0] * width
y = min_y + norm_pos[1] * height
return (x, y)
def screen_to_normalized(context, screen_pos):
"""
Convert screen pixel coords to normalized camera coords (0-1).
"""
bounds = get_camera_frame_bounds(context)
if bounds is None:
return None
min_x, min_y, width, height = bounds
if width < 1 or height < 1:
return None
norm_x = (screen_pos[0] - min_x) / width
norm_y = (screen_pos[1] - min_y) / height
return (norm_x, norm_y)
def is_point_on_screen(context, screen_pos, margin=0):
"""Check if a screen position is visible in the region."""
region = context.region
if region is None:
return False
x, y = screen_pos
return (-margin <= x <= region.width + margin and
-margin <= y <= region.height + margin)
def get_enabled_axes(settings):
"""Get the two enabled axes as a list [axis1, axis2]"""
return [settings.axis_1, settings.axis_2]
def is_axis_enabled(settings, axis):
"""Check if an axis is one of the two selected axes"""
return axis in (settings.axis_1, settings.axis_2)
def get_third_axis(settings):
"""Get the axis that is NOT selected (computed from the other two)"""
all_axes = {'x', 'y', 'z'}
selected = {settings.axis_1, settings.axis_2}
return list(all_axes - selected)[0] if len(selected) == 2 else None
def line_intersection_2d(p1, p2, p3, p4):
"""
Find intersection of two infinite lines defined by point pairs.
Returns intersection point or None if lines are parallel.
"""
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
x4, y4 = p4
denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
if abs(denom) < 0.0001:
return None # Parallel lines
t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / denom
ix = x1 + t * (x2 - x1)
iy = y1 + t * (y2 - y1)
return (ix, iy)
def is_point_in_forward_direction(start, end, point):
"""Check if point is in the forward direction from start through end."""
# Direction vector from start to end
dx = end[0] - start[0]
dy = end[1] - start[1]
# Vector from end to point
px = point[0] - end[0]
py = point[1] - end[1]
# Dot product: positive if point is beyond end in the direction start->end
dot = dx * px + dy * py
return dot > 0
def point_to_segment_distance(point, seg_start, seg_end):
"""
Calculate distance from point to line segment.
Returns (distance, t) where t is parameter along segment (0=start, 1=end).
Only returns valid distance if point projects onto the segment (0 <= t <= 1).
"""
px, py = point
x1, y1 = seg_start
x2, y2 = seg_end
dx = x2 - x1
dy = y2 - y1
seg_len_sq = dx * dx + dy * dy
if seg_len_sq < 0.0001:
# Segment is a point
dist = math.sqrt((px - x1)**2 + (py - y1)**2)
return dist, 0.5
# Project point onto line, clamped to segment
t = ((px - x1) * dx + (py - y1) * dy) / seg_len_sq
if t < 0 or t > 1:
# Point projects outside segment
return float('inf'), t
# Closest point on segment
closest_x = x1 + t * dx
closest_y = y1 + t * dy
dist = math.sqrt((px - closest_x)**2 + (py - closest_y)**2)
return dist, t
def get_vanishing_point_any_direction(context, axis):
"""Get vanishing point regardless of direction (for camera solving) - in screen coords"""
p1, p2, p3, p4 = get_screen_coords_for_axis(context, axis)
if None in (p1, p2, p3, p4):
return None
return line_intersection_2d(p1, p2, p3, p4)
# ---------------------------------------------------------------------------
# Camera Solving
# ---------------------------------------------------------------------------
def solve_camera_from_vanishing_points(context, vp1, vp2, axis1, axis2):
"""
Solve camera rotation and focal length from two vanishing points.
Uses the standard pinhole camera model where perpendicular world axes
have vanishing points satisfying: VP1·VP2 = -f² (relative to principal point)
"""
# Get camera frame bounds to find the correct center and size
bounds = get_camera_frame_bounds(context)
if bounds is None:
return None, None
min_x, min_y, frame_w, frame_h = bounds
if frame_w < 1 or frame_h < 1:
return None, None
# Camera frame center (in screen pixels), with principal point offset
settings = get_plot_settings(context)
pp_offset_x = settings.principal_point_x * frame_w
pp_offset_y = settings.principal_point_y * frame_h
cx = min_x + frame_w / 2 + pp_offset_x
cy = min_y + frame_h / 2 + pp_offset_y
# Vanishing points relative to (offset) optical center
# Screen coords: X right, Y up (Blender convention)
u1, v1 = vp1[0] - cx, vp1[1] - cy
u2, v2 = vp2[0] - cx, vp2[1] - cy
# Focal length from orthogonality constraint: d1·d2 = 0
# For directions (u1,v1,f) and (u2,v2,f): u1*u2 + v1*v2 + f² = 0
dot_uv = u1 * u2 + v1 * v2
if dot_uv >= 0:
f_pixels = frame_w * 0.8 # Default if constraint not satisfied
else:
f_pixels = math.sqrt(-dot_uv)
f_pixels = max(frame_w * 0.25, min(f_pixels, frame_w * 2.5))
camera = get_active_camera(context)
if camera is None:
return None, None
# Convert to mm
cam_data = camera.data
sensor_width = cam_data.sensor_width
render_w = context.scene.render.resolution_x
scale = render_w / frame_w
focal_length_mm = (f_pixels * scale / render_w) * sensor_width
focal_length_mm = max(15, min(focal_length_mm, 150))
# Build direction vectors in a camera-centric coordinate system:
# - cam_x: points RIGHT in the image
# - cam_y: points UP in the image
# - cam_z: points TOWARD VIEWER (out of screen) - this matches Blender's camera +Z
#
# A vanishing point at (u, v) on screen corresponds to a world direction
# that projects to that point. The ray direction FROM camera is (u, v, -f)
# because the image plane is in front of the camera.
# But we want the direction TO the vanishing point which is (u, v, f)
# interpreted as pointing away from camera into the scene.
#
# In our system where +Z is toward viewer:
# direction into scene = (u, v, -f) normalized
d1 = Vector((u1, v1, -f_pixels)).normalized()
d2 = Vector((u2, v2, -f_pixels)).normalized()
# d1 and d2 are the directions of world axes axis1 and axis2
# as seen from the camera (pointing into the scene)
# However, we don't know if they point in the + or - direction of each axis
# Apply sign conventions based on typical camera orientations:
# - Z axis (vertical) should point "up" in camera view (positive camera Y component)
# - For horizontal axes (X, Y), positive directions typically have positive camera X or Y
# Fix Z axis orientation: if Z is selected, it should point up in camera space
if axis1 == 'z' and d1.y < 0:
d1 = -d1
if axis2 == 'z' and d2.y < 0:
d2 = -d2
# For horizontal axes, ensure consistent orientation
# X axis should generally point right (positive camera X)
# Y axis direction depends on the scene
if axis1 == 'x' and d1.x < 0:
d1 = -d1
if axis2 == 'x' and d2.x < 0:
d2 = -d2
# Figure out third world axis
all_axes = ['x', 'y', 'z']
third_axis = [a for a in all_axes if a != axis1 and a != axis2][0]
# World axis vectors
world_vecs = {
'x': Vector((1, 0, 0)),
'y': Vector((0, 1, 0)),
'z': Vector((0, 0, 1)),
}
# Gram-Schmidt orthogonalization preserving d1 and keeping d2 close
e1 = d1.normalized()
e2_temp = d2 - d2.dot(e1) * e1
if e2_temp.length < 0.001:
return focal_length_mm, None
e2 = e2_temp.normalized()
# Third axis via cross product with correct handedness
# For right-handed coordinates: X = Y × Z, Y = Z × X, Z = X × Y
e3 = e1.cross(e2)
# Ensure correct handedness for the third axis
expected_cross = world_vecs[axis1].cross(world_vecs[axis2])
if expected_cross.dot(world_vecs[third_axis]) < 0:
e3 = -e3
# Map each world axis to its camera-space direction
dirs = {axis1: e1, axis2: e2, third_axis: e3}
# Build rotation matrix from camera to world
# If world axis W appears in direction D in camera space,
# then the camera's axes in world space are given by the inverse mapping
#
# Camera basis vectors in world coords:
# cam_right (X) should map world directions to camera X
# cam_up (Y) should map world directions to camera Y
# cam_forward (-Z for Blender) points into scene
#
# The matrix with dirs as rows transforms camera coords to world coords
cam_x_world = Vector((dirs['x'][0], dirs['y'][0], dirs['z'][0]))
cam_y_world = Vector((dirs['x'][1], dirs['y'][1], dirs['z'][1]))
cam_z_world = Vector((dirs['x'][2], dirs['y'][2], dirs['z'][2]))
# This gives us where camera's X, Y, Z axes point in world space
# For Blender camera: X=right, Y=up, -Z=forward (into scene)
# Our cam_z_world points toward viewer, so -cam_z_world points into scene
# Check if camera is upside down: camera's Y (up in image) should have
# positive Z component in world space (i.e., "up" in image = "up" in world)
# If not, we need to flip the camera 180 degrees around its view axis
if cam_y_world.z < 0:
# Camera is upside down - flip by negating X and Y axes
cam_x_world = -cam_x_world
cam_y_world = -cam_y_world
# Construct rotation matrix (camera to world)
R = Matrix((cam_x_world, cam_y_world, cam_z_world)).transposed()
try:
euler = R.to_euler('XYZ')
except Exception:
euler = Euler((0, 0, 0))
return focal_length_mm, euler
def update_camera_position_only(context):
"""Update camera position based on current rotation, distance and target (no re-solve)"""
settings = get_plot_settings(context)
camera = get_active_camera(context)
if camera is None:
return
target = Vector(settings.target_location) if settings.target_mode == 'MANUAL' else Vector((0, 0, 0))
distance = settings.camera_distance
# Use existing camera rotation
rot_matrix = camera.rotation_euler.to_matrix()
forward = rot_matrix @ Vector((0, 0, -1))
# Position camera at distance from target
camera.location = target - forward * distance
def update_focal_length_only(context):
"""Update only focal length based on principal point, without changing rotation"""
settings = get_plot_settings(context)
camera = get_active_camera(context)
if camera is None:
return
# Get selected axes
axis1, axis2 = settings.axis_1, settings.axis_2
if axis1 == axis2:
return
vp1 = get_vanishing_point_any_direction(context, axis1)
vp2 = get_vanishing_point_any_direction(context, axis2)
if vp1 is None or vp2 is None:
return
# Get camera frame bounds
bounds = get_camera_frame_bounds(context)
if bounds is None:
return
min_x, min_y, frame_w, frame_h = bounds
if frame_w < 1 or frame_h < 1:
return
# Camera frame center with principal point offset
pp_offset_x = settings.principal_point_x * frame_w
pp_offset_y = settings.principal_point_y * frame_h
cx = min_x + frame_w / 2 + pp_offset_x
cy = min_y + frame_h / 2 + pp_offset_y
# Vanishing points relative to (offset) optical center
u1, v1 = vp1[0] - cx, vp1[1] - cy
u2, v2 = vp2[0] - cx, vp2[1] - cy
# Focal length from orthogonality constraint
dot_uv = u1 * u2 + v1 * v2
if dot_uv >= 0:
f_pixels = frame_w * 0.8
else:
f_pixels = math.sqrt(-dot_uv)
f_pixels = max(frame_w * 0.25, min(f_pixels, frame_w * 2.5))
# Convert to mm
cam_data = camera.data
sensor_width = cam_data.sensor_width
render_w = context.scene.render.resolution_x
scale = render_w / frame_w
focal_length_mm = (f_pixels * scale / render_w) * sensor_width
focal_length_mm = max(15, min(focal_length_mm, 150))
# Only update focal length, keep rotation unchanged
camera.data.lens = focal_length_mm
def update_camera_from_vanishing_points(context, force=False):
"""Update camera based on current vanishing points"""
settings = get_plot_settings(context)
camera = get_active_camera(context)
if camera is None:
return
# Skip if auto_solve is disabled, unless forced
if not settings.auto_solve and not force:
return
# Get the two selected axes
axis1, axis2 = settings.axis_1, settings.axis_2
# Skip if same axis selected for both (invalid)
if axis1 == axis2:
return
# Use any direction for camera solving (VP can be behind lines)
vp1 = get_vanishing_point_any_direction(context, axis1)
vp2 = get_vanishing_point_any_direction(context, axis2)
if vp1 is None or vp2 is None:
return
focal_length, rotation = solve_camera_from_vanishing_points(
context, vp1, vp2, axis1, axis2
)
if focal_length is not None:
camera.data.lens = focal_length
if rotation is not None:
camera.rotation_euler = rotation
# Apply camera distance and target location
target = Vector(settings.target_location) if settings.target_mode == 'MANUAL' else Vector((0, 0, 0))
distance = settings.camera_distance
# Camera forward direction (looks down -Z local)
rot_matrix = rotation.to_matrix()
forward = rot_matrix @ Vector((0, 0, -1))
# Position camera at distance from target
camera.location = target - forward * distance
# ---------------------------------------------------------------------------
# Drawing
# ---------------------------------------------------------------------------
_draw_handler = None
AXIS_COLORS = {
'x': (1.0, 0.2, 0.3, 0.9),
'y': (0.5, 0.8, 0.2, 0.9),
'z': (0.3, 0.5, 1.0, 0.9),
}
DOT_RADIUS = 8
# Global drag state
_drag_state = {
'active': False,
'drag_type': 'point', # 'point' or 'line'
'axis': None,
'line_index': 0,
'point_index': 0,
'init_pos': None,
'init_mouse': None,
'last_mouse': None,
'current_pos': None,
# For line dragging - store both endpoints
'init_pos_start': None,
'init_pos_end': None,
'current_pos_start': None,
'current_pos_end': None,
'precise_mode': False,
}
def draw_circle_2d(center, radius, color, shader, filled=True):
segments = 20
if filled:
verts = [center]
for i in range(segments + 1):
angle = 2 * math.pi * i / segments
verts.append((center[0] + radius * math.cos(angle),
center[1] + radius * math.sin(angle)))
indices = [(0, i, i + 1 if i < segments else 1) for i in range(1, segments + 1)]
batch = batch_for_shader(shader, 'TRIS', {"pos": verts}, indices=indices)
else:
verts = []
for i in range(segments + 1):
angle = 2 * math.pi * i / segments
verts.append((center[0] + radius * math.cos(angle),
center[1] + radius * math.sin(angle)))
batch = batch_for_shader(shader, 'LINE_STRIP', {"pos": verts})
shader.bind()
shader.uniform_float("color", color)
batch.draw(shader)
def get_screen_coords_for_axis(context, axis):
"""Get screen coordinates for an axis, converting from normalized storage."""
data = get_axis_data(context, axis)
p1 = normalized_to_screen(context, tuple(data.line1_start))
p2 = normalized_to_screen(context, tuple(data.line1_end))
p3 = normalized_to_screen(context, tuple(data.line2_start))
p4 = normalized_to_screen(context, tuple(data.line2_end))
return p1, p2, p3, p4
def draw_focalplotter():
"""Main draw callback"""
context = bpy.context
if not is_in_camera_view(context):
return
settings = get_plot_settings(context)
if not settings.is_plotting:
return
region = context.region
if region is None:
return
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
gpu.state.blend_set('ALPHA')
gpu.state.line_width_set(2.0)
# Draw the two selected axes
axes_to_draw = [settings.axis_1, settings.axis_2]
for axis in axes_to_draw:
color = AXIS_COLORS[axis]
# Convert normalized coords to screen coords
p1, p2, p3, p4 = get_screen_coords_for_axis(context, axis)
if None in (p1, p2, p3, p4):
continue
# Get vanishing point (intersection of the two lines) in screen coords
vp = line_intersection_2d(p1, p2, p3, p4)
# Draw line 1: always draw segment between dots
batch = batch_for_shader(shader, 'LINES', {"pos": [p1, p2]})
shader.bind()
shader.uniform_float("color", color)
batch.draw(shader)
# Draw line 2: always draw segment between dots
batch = batch_for_shader(shader, 'LINES', {"pos": [p3, p4]})
shader.bind()
shader.uniform_float("color", color)
batch.draw(shader)
# If vanishing point exists, draw rays from segment endpoints to VP
if vp:
ray_color = (*color[:3], 0.4) # Dimmer for the ray extension
# For line 1: check which endpoint the VP is beyond
# VP beyond p2 (in direction p1->p2) OR VP beyond p1 (in direction p2->p1)
vp_beyond_p2 = is_point_in_forward_direction(p1, p2, vp)
vp_beyond_p1 = is_point_in_forward_direction(p2, p1, vp)
if vp_beyond_p2:
batch = batch_for_shader(shader, 'LINES', {"pos": [p2, vp]})
shader.bind()
shader.uniform_float("color", ray_color)
batch.draw(shader)
elif vp_beyond_p1:
batch = batch_for_shader(shader, 'LINES', {"pos": [p1, vp]})
shader.bind()
shader.uniform_float("color", ray_color)
batch.draw(shader)
# For line 2: check which endpoint the VP is beyond
vp_beyond_p4 = is_point_in_forward_direction(p3, p4, vp)
vp_beyond_p3 = is_point_in_forward_direction(p4, p3, vp)
if vp_beyond_p4:
batch = batch_for_shader(shader, 'LINES', {"pos": [p4, vp]})
shader.bind()
shader.uniform_float("color", ray_color)
batch.draw(shader)
elif vp_beyond_p3:
batch = batch_for_shader(shader, 'LINES', {"pos": [p3, vp]})
shader.bind()
shader.uniform_float("color", ray_color)
batch.draw(shader)
# Draw vanishing point marker if VP is beyond both segments (not inside them)
line1_has_ray = vp_beyond_p2 or vp_beyond_p1
line2_has_ray = vp_beyond_p4 or vp_beyond_p3
if line1_has_ray and line2_has_ray:
draw_circle_2d(vp, DOT_RADIUS + 4, (*color[:3], 0.6), shader, filled=False)
# Draw dots (skip dragged dot in precise mode, skip off-screen dots)
for point in [(p1, 1, 0), (p2, 1, 1), (p3, 2, 0), (p4, 2, 1)]:
pos, li, pi = point
# Skip if this is the dragged dot in precise mode
if (_drag_state['precise_mode'] and _drag_state['active'] and
_drag_state['axis'] == axis and
_drag_state['line_index'] == li and
_drag_state['point_index'] == pi):
continue
# Skip if dot is off-screen (rays still render, dot position preserved)
if not is_point_on_screen(context, pos, margin=DOT_RADIUS):
continue
draw_circle_2d(pos, DOT_RADIUS, color, shader)
# Draw horizon line (only when both X and Y are selected - the true horizon)
# Horizon is the vanishing line of the ground plane (X-Y), doesn't apply to Z combinations
has_horizon = 'x' in (settings.axis_1, settings.axis_2) and 'y' in (settings.axis_1, settings.axis_2)
if settings.show_horizon and has_horizon:
x_p1, x_p2, x_p3, x_p4 = get_screen_coords_for_axis(context, 'x')
y_p1, y_p2, y_p3, y_p4 = get_screen_coords_for_axis(context, 'y')
if None not in (x_p1, x_p2, x_p3, x_p4, y_p1, y_p2, y_p3, y_p4):
vp_x = line_intersection_2d(x_p1, x_p2, x_p3, x_p4)
vp_y = line_intersection_2d(y_p1, y_p2, y_p3, y_p4)
if vp_x and vp_y:
# Draw horizon line through both vanishing points
horizon_color = (1.0, 1.0, 0.0, 0.5) # Yellow
gpu.state.line_width_set(1.0)
# Extend the line across the screen
dx = vp_y[0] - vp_x[0]
dy = vp_y[1] - vp_x[1]
length = math.sqrt(dx*dx + dy*dy)
if length > 0.001:
# Normalize and extend
dx, dy = dx/length, dy/length
extend = 5000 # Extend far beyond screen
p_start = (vp_x[0] - dx * extend, vp_x[1] - dy * extend)
p_end = (vp_y[0] + dx * extend, vp_y[1] + dy * extend)
batch = batch_for_shader(shader, 'LINES', {"pos": [p_start, p_end]})
shader.bind()
shader.uniform_float("color", horizon_color)
batch.draw(shader)
gpu.state.blend_set('NONE')
gpu.state.line_width_set(1.0)
def register_draw_handler():
global _draw_handler
if _draw_handler is None:
_draw_handler = bpy.types.SpaceView3D.draw_handler_add(
draw_focalplotter, (), 'WINDOW', 'POST_PIXEL'
)
def unregister_draw_handler():
global _draw_handler
if _draw_handler is not None:
bpy.types.SpaceView3D.draw_handler_remove(_draw_handler, 'WINDOW')
_draw_handler = None
# ---------------------------------------------------------------------------
# Interactive Modal Operator
# ---------------------------------------------------------------------------
class FOCALPLOTTER_OT_interactive(Operator):
"""Interactive mode - click and drag dots directly"""
bl_idname = "focalplotter.interactive"
bl_label = "FocalPlotter Interactive"
bl_options = {'REGISTER'}
def find_nearest_point(self, context, mouse_pos):
"""Find nearest dot to mouse position. Returns (axis, line_idx, point_idx, screen_pos) or None."""
settings = get_plot_settings(context)