1import builtins 2import functools 3from collections import OrderedDict 4 5import numpy as np 6 7from yt.config import ytcfg 8from yt.funcs import mylog 9from yt.units.dimensions import length 10from yt.units.unit_registry import UnitRegistry 11from yt.units.yt_array import YTArray, YTQuantity 12from yt.utilities.exceptions import YTNotInsideNotebook 13from yt.visualization._commons import get_canvas, validate_image_name 14 15from .camera import Camera 16from .render_source import ( 17 BoxSource, 18 CoordinateVectorSource, 19 GridSource, 20 LineSource, 21 MeshSource, 22 OpaqueSource, 23 PointSource, 24 RenderSource, 25 VolumeSource, 26) 27from .zbuffer_array import ZBuffer 28 29 30class Scene: 31 32 """A virtual landscape for a volume rendering. 33 34 The Scene class is meant to be the primary container for the 35 new volume rendering framework. A single scene may contain 36 several Camera and RenderSource instances, and is the primary 37 driver behind creating a volume rendering. 38 39 This sets up the basics needed to add sources and cameras. 40 This does very little setup, and requires additional input 41 to do anything useful. 42 43 Examples 44 -------- 45 46 This example shows how to create an empty scene and add a VolumeSource 47 and a Camera. 48 49 >>> import yt 50 >>> from yt.visualization.volume_rendering.api import ( 51 ... Camera, Scene, create_volume_source) 52 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 53 >>> sc = Scene() 54 >>> source = create_volume_source(ds.all_data(), "density") 55 >>> sc.add_source(source) 56 >>> cam = sc.add_camera() 57 >>> im = sc.render() 58 59 Alternatively, you can use the create_scene function to set up defaults 60 and then modify the Scene later: 61 62 >>> import yt 63 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 64 65 >>> sc = yt.create_scene(ds) 66 >>> # Modify camera, sources, etc... 67 >>> im = sc.render() 68 69 """ 70 71 _current = None 72 _camera = None 73 _unit_registry = None 74 75 def __init__(self): 76 r"""Create a new Scene instance""" 77 super().__init__() 78 self.sources = OrderedDict() 79 self._last_render = None 80 # A non-public attribute used to get around the fact that we can't 81 # pass kwargs into _repr_png_() 82 self._sigma_clip = None 83 84 def get_source(self, source_num=0): 85 """Returns the volume rendering source indexed by ``source_num``""" 86 return list(self.sources.values())[source_num] 87 88 def __getitem__(self, item): 89 if item in self.sources: 90 return self.sources[item] 91 return self.get_source(item) 92 93 @property 94 def opaque_sources(self): 95 """ 96 Iterate over opaque RenderSource objects, 97 returning a tuple of (key, source) 98 """ 99 for k, source in self.sources.items(): 100 if isinstance(source, OpaqueSource) or issubclass( 101 OpaqueSource, type(source) 102 ): 103 yield k, source 104 105 @property 106 def transparent_sources(self): 107 """ 108 Iterate over transparent RenderSource objects, 109 returning a tuple of (key, source) 110 """ 111 for k, source in self.sources.items(): 112 if not isinstance(source, OpaqueSource): 113 yield k, source 114 115 def add_source(self, render_source, keyname=None): 116 """Add a render source to the scene. 117 118 This will autodetect the type of source. 119 120 Parameters 121 ---------- 122 render_source: 123 :class:`yt.visualization.volume_rendering.render_source.RenderSource` 124 A source to contribute to the volume rendering scene. 125 126 keyname: string (optional) 127 The dictionary key used to reference the source in the sources 128 dictionary. 129 """ 130 if keyname is None: 131 keyname = "source_%02i" % len(self.sources) 132 133 data_sources = (VolumeSource, MeshSource, GridSource) 134 135 if isinstance(render_source, data_sources): 136 self._set_new_unit_registry(render_source.data_source.ds.unit_registry) 137 138 line_annotation_sources = (GridSource, BoxSource, CoordinateVectorSource) 139 140 if isinstance(render_source, line_annotation_sources): 141 lens_str = str(self.camera.lens) 142 if "fisheye" in lens_str or "spherical" in lens_str: 143 raise NotImplementedError( 144 "Line annotation sources are not supported for %s." 145 % (type(self.camera.lens).__name__), 146 ) 147 148 if isinstance(render_source, (LineSource, PointSource)): 149 if isinstance(render_source.positions, YTArray): 150 render_source.positions = ( 151 self.arr(render_source.positions).in_units("code_length").d 152 ) 153 154 self.sources[keyname] = render_source 155 156 return self 157 158 def __setitem__(self, key, value): 159 return self.add_source(value, key) 160 161 def _set_new_unit_registry(self, input_registry): 162 self.unit_registry = UnitRegistry( 163 add_default_symbols=False, lut=input_registry.lut 164 ) 165 166 # Validate that the new unit registry makes sense 167 current_scaling = self.unit_registry["unitary"][0] 168 if current_scaling != input_registry["unitary"][0]: 169 for source in self.sources.items(): 170 data_source = getattr(source, "data_source", None) 171 if data_source is None: 172 continue 173 scaling = data_source.ds.unit_registry["unitary"][0] 174 if scaling != current_scaling: 175 raise NotImplementedError( 176 "Simultaneously rendering data from datasets with " 177 "different units is not supported" 178 ) 179 180 def render(self, camera=None): 181 r"""Render all sources in the Scene. 182 183 Use the current state of the Scene object to render all sources 184 currently in the scene. Returns the image array. If you want to 185 save the output to a file, call the save() function. 186 187 Parameters 188 ---------- 189 camera: :class:`Camera`, optional 190 If specified, use a different :class:`Camera` to render the scene. 191 192 Returns 193 ------- 194 A :class:`yt.data_objects.image_array.ImageArray` instance containing 195 the current rendering image. 196 197 Examples 198 -------- 199 200 >>> import yt 201 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 202 203 >>> sc = yt.create_scene(ds) 204 >>> # Modify camera, sources, etc... 205 >>> im = sc.render() 206 >>> sc.save(sigma_clip=4.0, render=False) 207 208 Altneratively, if you do not need the image array, you can just call 209 ``save`` as follows. 210 211 >>> import yt 212 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 213 214 >>> sc = yt.create_scene(ds) 215 >>> # Modify camera, sources, etc... 216 >>> sc.save(sigma_clip=4.0) 217 218 """ 219 mylog.info("Rendering scene (Can take a while).") 220 if camera is None: 221 camera = self.camera 222 assert camera is not None 223 self._validate() 224 bmp = self.composite(camera=camera) 225 self._last_render = bmp 226 return bmp 227 228 def _render_on_demand(self, render): 229 # checks for existing render before rendering, in most cases we want to 230 # render every time, but in some cases pulling the previous render is 231 # desirable (e.g., if only changing sigma_clip or 232 # saving after a call to sc.show()). 233 234 if self._last_render is not None and not render: 235 mylog.info("Found previously rendered image to save.") 236 return 237 238 if self._last_render is None: 239 mylog.warning("No previously rendered image found, rendering now.") 240 elif render: 241 mylog.warning( 242 "Previously rendered image exists, but rendering anyway. " 243 "Supply 'render=False' to save previously rendered image directly." 244 ) 245 self.render() 246 247 def _get_render_sources(self): 248 return [s for s in self.sources.values() if isinstance(s, RenderSource)] 249 250 def _setup_save(self, fname, render): 251 252 self._render_on_demand(render) 253 254 rensources = self._get_render_sources() 255 if fname is None: 256 # if a volume source present, use its affiliated ds for fname 257 if len(rensources) > 0: 258 rs = rensources[0] 259 basename = rs.data_source.ds.basename 260 if isinstance(rs.field, str): 261 field = rs.field 262 else: 263 field = rs.field[-1] 264 fname = f"{basename}_Render_{field}" 265 # if no volume source present, use a default filename 266 else: 267 fname = "Render_opaque" 268 269 fname = validate_image_name(fname) 270 mylog.info("Saving rendered image to %s", fname) 271 return fname 272 273 def save(self, fname=None, sigma_clip=None, render=True): 274 r"""Saves a rendered image of the Scene to disk. 275 276 Once you have created a scene, this saves an image array to disk with 277 an optional filename. This function calls render() to generate an 278 image array, unless the render parameter is set to False, in which case 279 the most recently rendered scene is used if it exists. 280 281 Parameters 282 ---------- 283 fname: string, optional 284 If specified, save the rendering as to the file "fname". 285 If unspecified, it creates a default based on the dataset filename. 286 The file format is inferred from the filename's suffix. Supported 287 fomats are png, pdf, eps, and ps. 288 Default: None 289 sigma_clip: float, optional 290 Image values greater than this number times the standard deviation 291 plus the mean of the image will be clipped before saving. Useful 292 for enhancing images as it gets rid of rare high pixel values. 293 Default: None 294 295 floor(vals > std_dev*sigma_clip + mean) 296 render: boolean, optional 297 If True, will always render the scene before saving. 298 If False, will use results of previous render if it exists. 299 Default: True 300 301 Returns 302 ------- 303 Nothing 304 305 Examples 306 -------- 307 308 >>> import yt 309 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 310 311 >>> sc = yt.create_scene(ds) 312 >>> # Modify camera, sources, etc... 313 >>> sc.save("test.png", sigma_clip=4) 314 315 When saving multiple images without modifying the scene (camera, 316 sources,etc.), render=False can be used to avoid re-rendering. 317 This is useful for generating images at a range of sigma_clip values: 318 319 >>> import yt 320 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 321 322 >>> sc = yt.create_scene(ds) 323 >>> # save with different sigma clipping values 324 >>> sc.save("raw.png") # The initial render call happens here 325 >>> sc.save("clipped_2.png", sigma_clip=2, render=False) 326 >>> sc.save("clipped_4.png", sigma_clip=4, render=False) 327 328 """ 329 fname = self._setup_save(fname, render) 330 331 # We can render pngs natively but for other formats we defer to 332 # matplotlib. 333 if fname.endswith(".png"): 334 self._last_render.write_png(fname, sigma_clip=sigma_clip) 335 else: 336 from matplotlib.figure import Figure 337 338 shape = self._last_render.shape 339 fig = Figure((shape[0] / 100.0, shape[1] / 100.0)) 340 canvas = get_canvas(fig, fname) 341 342 ax = fig.add_axes([0, 0, 1, 1]) 343 ax.set_axis_off() 344 out = self._last_render 345 nz = out[:, :, :3][out[:, :, :3].nonzero()] 346 max_val = nz.mean() + sigma_clip * nz.std() 347 alpha = 255 * out[:, :, 3].astype("uint8") 348 out = np.clip(out[:, :, :3] / max_val, 0.0, 1.0) * 255 349 out = np.concatenate([out.astype("uint8"), alpha[..., None]], axis=-1) 350 # not sure why we need rot90, but this makes the orientation 351 # match the png writer 352 ax.imshow(np.rot90(out), origin="lower") 353 canvas.print_figure(fname, dpi=100) 354 355 def save_annotated( 356 self, 357 fname=None, 358 label_fmt=None, 359 text_annotate=None, 360 dpi=100, 361 sigma_clip=None, 362 render=True, 363 ): 364 r"""Saves the most recently rendered image of the Scene to disk, 365 including an image of the transfer function and and user-defined 366 text. 367 368 Once you have created a scene and rendered that scene to an image 369 array, this saves that image array to disk with an optional filename. 370 If an image has not yet been rendered for the current scene object, 371 it forces one and writes it out. 372 373 Parameters 374 ---------- 375 fname: string, optional 376 If specified, save the rendering as a bitmap to the file "fname". 377 If unspecified, it creates a default based on the dataset filename. 378 Default: None 379 sigma_clip: float, optional 380 Image values greater than this number times the standard deviation 381 plus the mean of the image will be clipped before saving. Useful 382 for enhancing images as it gets rid of rare high pixel values. 383 Default: None 384 385 floor(vals > std_dev*sigma_clip + mean) 386 dpi: integer, optional 387 By default, the resulting image will be the same size as the camera 388 parameters. If you supply a dpi, then the image will be scaled 389 accordingly (from the default 100 dpi) 390 label_fmt : str, optional 391 A format specifier (e.g., label_fmt="%.2g") to use in formatting 392 the data values that label the transfer function colorbar. 393 text_annotate : list of iterables 394 Any text that you wish to display on the image. This should be an 395 list containing a tuple of coordinates (in normalized figure 396 coordinates), the text to display, and, optionally, a dictionary of 397 keyword/value pairs to pass through to the matplotlib text() 398 function. 399 400 Each item in the main list is a separate string to write. 401 render: boolean, optional 402 If True, will render the scene before saving. 403 If False, will use results of previous render if it exists. 404 Default: True 405 406 Returns 407 ------- 408 Nothing 409 410 411 Examples 412 -------- 413 414 >>> sc.save_annotated( 415 ... "fig.png", 416 ... text_annotate=[ 417 ... [ 418 ... (0.05, 0.05), 419 ... f"t = {ds.current_time.d}", 420 ... dict(horizontalalignment="left"), 421 ... ], 422 ... [ 423 ... (0.5, 0.95), 424 ... "simulation title", 425 ... dict(color="y", fontsize="24", horizontalalignment="center"), 426 ... ], 427 ... ], 428 ... ) 429 430 """ 431 fname = self._setup_save(fname, render) 432 433 # which transfer function? 434 rs = self._get_render_sources()[0] 435 tf = rs.transfer_function 436 label = rs.data_source.ds._get_field_info(rs.field).get_label() 437 438 ax = self._show_mpl( 439 self._last_render.swapaxes(0, 1), sigma_clip=sigma_clip, dpi=dpi 440 ) 441 self._annotate(ax.axes, tf, rs, label=label, label_fmt=label_fmt) 442 443 # any text? 444 if text_annotate is not None: 445 f = self._render_figure 446 for t in text_annotate: 447 xy = t[0] 448 string = t[1] 449 if len(t) == 3: 450 opt = t[2] 451 else: 452 opt = dict() 453 454 # sane default 455 if "color" not in opt: 456 opt["color"] = "w" 457 458 ax.axes.text(xy[0], xy[1], string, transform=f.transFigure, **opt) 459 460 self._render_figure.canvas = get_canvas(self._render_figure, fname) 461 self._render_figure.tight_layout() 462 self._render_figure.savefig(fname, facecolor="black", pad_inches=0) 463 464 def _show_mpl(self, im, sigma_clip=None, dpi=100): 465 from matplotlib.figure import Figure 466 467 s = im.shape 468 self._render_figure = Figure(figsize=(s[1] / float(dpi), s[0] / float(dpi))) 469 self._render_figure.clf() 470 ax = self._render_figure.add_subplot(111) 471 ax.set_position([0, 0, 1, 1]) 472 473 if sigma_clip is not None: 474 nz = im[im > 0.0] 475 nim = im / (nz.mean() + sigma_clip * np.std(nz)) 476 nim[nim > 1.0] = 1.0 477 nim[nim < 0.0] = 0.0 478 del nz 479 else: 480 nim = im 481 axim = ax.imshow(nim[:, :, :3] / nim[:, :, :3].max(), interpolation="bilinear") 482 483 return axim 484 485 def _annotate(self, ax, tf, source, label="", label_fmt=None): 486 ax.get_xaxis().set_visible(False) 487 ax.get_xaxis().set_ticks([]) 488 ax.get_yaxis().set_visible(False) 489 ax.get_yaxis().set_ticks([]) 490 cb = self._render_figure.colorbar( 491 ax.images[0], pad=0.0, fraction=0.05, drawedges=True 492 ) 493 tf.vert_cbar( 494 ax=cb.ax, 495 label=label, 496 label_fmt=label_fmt, 497 resolution=self.camera.resolution[0], 498 log_scale=source.log_field, 499 ) 500 501 def _validate(self): 502 r"""Validate the current state of the scene.""" 503 504 for source in self.sources.values(): 505 source._validate() 506 return 507 508 def composite(self, camera=None): 509 r"""Create a composite image of the current scene. 510 511 First iterate over the opaque sources and set the ZBuffer. 512 Then iterate over the transparent sources, rendering from the value 513 of the zbuffer to the front of the box. Typically this function is 514 accessed through the .render() command. 515 516 Parameters 517 ---------- 518 camera: :class:`Camera`, optional 519 If specified, use a specific :class:`Camera` to render the scene. 520 521 Returns 522 ------- 523 im: :class:`ImageArray` 524 ImageArray instance of the current rendering image. 525 526 Examples 527 -------- 528 529 >>> import yt 530 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 531 532 >>> sc = yt.create_scene(ds) 533 >>> # Modify camera, sources, etc... 534 >>> im = sc.composite() 535 536 """ 537 if camera is None: 538 camera = self.camera 539 empty = camera.lens.new_image(camera) 540 opaque = ZBuffer(empty, np.full(empty.shape[:2], np.inf)) 541 542 for _, source in self.opaque_sources: 543 source.render(camera, zbuffer=opaque) 544 im = source.zbuffer.rgba 545 546 for _, source in self.transparent_sources: 547 im = source.render(camera, zbuffer=opaque) 548 opaque.rgba = im 549 550 # rotate image 180 degrees so orientation agrees with e.g. 551 # a PlotWindow plot 552 return np.rot90(im, k=2) 553 554 def add_camera(self, data_source=None, lens_type="plane-parallel", auto=False): 555 r"""Add a new camera to the Scene. 556 557 The camera is defined by a position (the location of the camera 558 in the simulation domain,), a focus (the point at which the 559 camera is pointed), a width (the width of the snapshot that will 560 be taken, a resolution (the number of pixels in the image), and 561 a north_vector (the "up" direction in the resulting image). A 562 camera can use a variety of different Lens objects. 563 564 If the scene already has a camera associated with it, this function 565 will create a new camera and discard the old one. 566 567 Parameters 568 ---------- 569 data_source: :class:`AMR3DData` or :class:`Dataset`, optional 570 This is the source to be rendered, which can be any arbitrary yt 571 data object or dataset. 572 lens_type: string, optional 573 This specifies the type of lens to use for rendering. Current 574 options are 'plane-parallel', 'perspective', and 'fisheye'. See 575 :class:`yt.visualization.volume_rendering.lens.Lens` for details. 576 Default: 'plane-parallel' 577 auto: boolean 578 If True, build smart defaults using the data source extent. This 579 can be time-consuming to iterate over the entire dataset to find 580 the positional bounds. Default: False 581 582 Examples 583 -------- 584 585 In this example, the camera is set using defaults that are chosen 586 to be reasonable for the argument Dataset. 587 588 >>> import yt 589 >>> from yt.visualization.volume_rendering.api import Camera, Scene 590 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 591 >>> sc = Scene() 592 >>> sc.add_camera() 593 594 Here, we set the camera properties manually: 595 596 >>> import yt 597 >>> from yt.visualization.volume_rendering.api import Camera, Scene 598 >>> sc = Scene() 599 >>> cam = sc.add_camera() 600 >>> cam.position = np.array([0.5, 0.5, -1.0]) 601 >>> cam.focus = np.array([0.5, 0.5, 0.0]) 602 >>> cam.north_vector = np.array([1.0, 0.0, 0.0]) 603 604 Finally, we create a camera with a non-default lens: 605 606 >>> import yt 607 >>> from yt.visualization.volume_rendering.api import Camera 608 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 609 >>> sc = Scene() 610 >>> sc.add_camera(ds, lens_type="perspective") 611 612 """ 613 self._camera = Camera(self, data_source, lens_type, auto) 614 return self.camera 615 616 def camera(): 617 doc = r"""The camera property. 618 619 This is the default camera that will be used when rendering. Can be set 620 manually, but Camera type will be checked for validity. 621 """ 622 623 def fget(self): 624 return self._camera 625 626 def fset(self, value): 627 value.width = self.arr(value.width) 628 value.focus = self.arr(value.focus) 629 value.position = self.arr(value.position) 630 self._camera = value 631 632 def fdel(self): 633 del self._camera 634 self._camera = None 635 636 return locals() 637 638 camera = property(**camera()) 639 640 def unit_registry(): 641 def fget(self): 642 ur = self._unit_registry 643 if ur is None: 644 ur = UnitRegistry() 645 # This will be updated when we add a volume source 646 ur.add("unitary", 1.0, length) 647 self._unit_registry = ur 648 return self._unit_registry 649 650 def fset(self, value): 651 self._unit_registry = value 652 if self.camera is not None: 653 self.camera.width = YTArray( 654 self.camera.width.in_units("unitary"), registry=value 655 ) 656 self.camera.focus = YTArray( 657 self.camera.focus.in_units("unitary"), registry=value 658 ) 659 self.camera.position = YTArray( 660 self.camera.position.in_units("unitary"), registry=value 661 ) 662 663 def fdel(self): 664 del self._unit_registry 665 self._unit_registry = None 666 667 return locals() 668 669 unit_registry = property(**unit_registry()) 670 671 def set_camera(self, camera): 672 r""" 673 674 Set the camera to be used by this scene. 675 676 """ 677 self.camera = camera 678 679 def get_camera(self): 680 r""" 681 682 Get the camera currently used by this scene. 683 684 """ 685 return self.camera 686 687 def annotate_domain(self, ds, color=None): 688 r""" 689 690 Modifies this scene by drawing the edges of the computational domain. 691 This adds a new BoxSource to the scene corresponding to the domain 692 boundaries and returns the modified scene object. 693 694 Parameters 695 ---------- 696 697 ds : :class:`yt.data_objects.static_output.Dataset` 698 This is the dataset object corresponding to the 699 simulation being rendered. Used to get the domain bounds. 700 color : array_like of shape (4,), optional 701 The RGBA value to use to draw the domain boundaries. 702 Default is black with an alpha of 1.0. 703 704 Examples 705 -------- 706 707 >>> import yt 708 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 709 710 >>> sc = yt.create_scene(ds) 711 >>> sc.annotate_domain(ds) 712 >>> im = sc.render() 713 714 """ 715 box_source = BoxSource(ds.domain_left_edge, ds.domain_right_edge, color=color) 716 self.add_source(box_source) 717 return self 718 719 def annotate_grids( 720 self, data_source, alpha=0.3, cmap=None, min_level=None, max_level=None 721 ): 722 r""" 723 724 Modifies this scene by drawing the edges of the AMR grids. 725 This adds a new GridSource to the scene that represents the AMR grid 726 and returns the resulting Scene object. 727 728 Parameters 729 ---------- 730 731 data_source: :class:`~yt.data_objects.api.DataContainer` 732 The data container that will be used to identify grids to draw. 733 alpha : float 734 The opacity of the grids to draw. 735 cmap : color map name 736 The color map to use to map resolution levels to color. 737 min_level : int, optional 738 Minimum level to draw 739 max_level : int, optional 740 Maximum level to draw 741 742 743 Examples 744 -------- 745 746 >>> import yt 747 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 748 749 >>> sc = yt.create_scene(ds) 750 >>> sc.annotate_grids(ds.all_data()) 751 >>> im = sc.render() 752 753 """ 754 if cmap is None: 755 cmap = ytcfg.get("yt", "default_colormap") 756 grids = GridSource( 757 data_source, 758 alpha=alpha, 759 cmap=cmap, 760 min_level=min_level, 761 max_level=max_level, 762 ) 763 self.add_source(grids) 764 return self 765 766 def annotate_mesh_lines(self, color=None, alpha=1.0): 767 """ 768 769 Modifies this Scene by drawing the mesh line boundaries 770 on all MeshSources. 771 772 Parameters 773 ---------- 774 color : array_like of shape (4,), optional 775 The RGBA value to use to draw the mesh lines. 776 Default is black with an alpha of 1.0. 777 alpha : float, optional 778 The opacity of the mesh lines. Default is 255 (solid). 779 780 """ 781 for _, source in self.opaque_sources: 782 if isinstance(source, MeshSource): 783 source.annotate_mesh_lines(color=color, alpha=alpha) 784 return self 785 786 def annotate_axes(self, colors=None, alpha=1.0): 787 r""" 788 789 Modifies this scene by drawing the coordinate axes. 790 This adds a new CoordinateVectorSource to the scene 791 and returns the modified scene object. 792 793 Parameters 794 ---------- 795 colors: array-like of shape (3,4), optional 796 The RGBA values to use to draw the x, y, and z vectors. The default 797 is [[1, 0, 0, alpha], [0, 1, 0, alpha], [0, 0, 1, alpha]] where 798 ``alpha`` is set by the parameter below. If ``colors`` is set then 799 ``alpha`` is ignored. 800 alpha : float, optional 801 The opacity of the vectors. 802 803 Examples 804 -------- 805 806 >>> import yt 807 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 808 809 >>> sc = yt.create_scene(ds) 810 >>> sc.annotate_axes(alpha=0.5) 811 >>> im = sc.render() 812 813 """ 814 coords = CoordinateVectorSource(colors, alpha) 815 self.add_source(coords) 816 return self 817 818 def show(self, sigma_clip=None): 819 r"""This will send the most recently rendered image to the IPython 820 notebook. 821 822 If yt is being run from within an IPython session, and it is able to 823 determine this, this function will send the current image of this Scene 824 to the notebook for display. If there is no current image, it will 825 run the render() method on this Scene before sending the result to the 826 notebook. 827 828 If yt can't determine if it's inside an IPython session, this will raise 829 YTNotInsideNotebook. 830 831 Examples 832 -------- 833 834 >>> import yt 835 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 836 837 >>> sc = yt.create_scene(ds) 838 >>> sc.show() 839 840 """ 841 if "__IPYTHON__" in dir(builtins): 842 from IPython.display import display 843 844 self._sigma_clip = sigma_clip 845 display(self) 846 else: 847 raise YTNotInsideNotebook 848 849 _arr = None 850 851 @property 852 def arr(self): 853 """Converts an array into a :class:`yt.units.yt_array.YTArray` 854 855 The returned YTArray will be dimensionless by default, but can be 856 cast to arbitrary units using the ``units`` keyword argument. 857 858 Parameters 859 ---------- 860 861 input_array : Iterable 862 A tuple, list, or array to attach units to 863 units: String unit specification, unit symbol object, or astropy 864 units object 865 input_units : deprecated in favor of 'units' 866 The units of the array. Powers must be specified using python syntax 867 (cm**3, not cm^3). 868 dtype : string or NumPy dtype object 869 The dtype of the returned array data 870 871 Examples 872 -------- 873 874 >>> a = sc.arr([1, 2, 3], "cm") 875 >>> b = sc.arr([4, 5, 6], "m") 876 >>> a + b 877 YTArray([ 401., 502., 603.]) cm 878 >>> b + a 879 YTArray([ 4.01, 5.02, 6.03]) m 880 881 Arrays returned by this function know about the scene's unit system 882 883 >>> a = sc.arr(np.ones(5), "unitary") 884 >>> a.in_units("Mpc") 885 YTArray([ 1.00010449, 1.00010449, 1.00010449, 1.00010449, 886 1.00010449]) Mpc 887 888 """ 889 if self._arr is not None: 890 return self._arr 891 self._arr = functools.partial(YTArray, registry=self.unit_registry) 892 return self._arr 893 894 _quan = None 895 896 @property 897 def quan(self): 898 """Converts an scalar into a :class:`yt.units.yt_array.YTQuantity` 899 900 The returned YTQuantity will be dimensionless by default, but can be 901 cast to arbitrary units using the ``units`` keyword argument. 902 903 Parameters 904 ---------- 905 906 input_scalar : an integer or floating point scalar 907 The scalar to attach units to 908 units : String unit specification, unit symbol object, or astropy 909 units 910 input_units : deprecated in favor of 'units' 911 The units of the quantity. Powers must be specified using python 912 syntax (cm**3, not cm^3). 913 dtype : string or NumPy dtype object 914 The dtype of the array data. 915 916 Examples 917 -------- 918 919 >>> a = sc.quan(1, "cm") 920 >>> b = sc.quan(2, "m") 921 >>> a + b 922 201.0 cm 923 >>> b + a 924 2.01 m 925 926 Quantities created this way automatically know about the unit system 927 of the scene 928 929 >>> a = ds.quan(5, "unitary") 930 >>> a.in_cgs() 931 1.543e+25 cm 932 933 """ 934 if self._quan is not None: 935 return self._quan 936 self._quan = functools.partial(YTQuantity, registry=self.unit_registry) 937 return self._quan 938 939 def _repr_png_(self): 940 if self._last_render is None: 941 self.render() 942 png = self._last_render.write_png( 943 filename=None, sigma_clip=self._sigma_clip, background="black" 944 ) 945 self._sigma_clip = None 946 return png 947 948 def __repr__(self): 949 disp = "<Scene Object>:" 950 disp += "\nSources: \n" 951 for k, v in self.sources.items(): 952 disp += f" {k}: {v}\n" 953 disp += "Camera: \n" 954 disp += f" {self.camera}" 955 return disp 956