1from collections.abc import Iterable, MutableSequence 2import copy 3from functools import partial, reduce 4from itertools import product 5from numbers import Integral, Real 6import operator 7from pathlib import Path 8from xml.etree import ElementTree as ET 9 10import h5py 11import numpy as np 12import pandas as pd 13import scipy.sparse as sps 14 15import openmc 16import openmc.checkvalue as cv 17from ._xml import clean_indentation, reorder_attributes 18from .mixin import IDManagerMixin 19 20 21# The tally arithmetic product types. The tensor product performs the full 22# cross product of the data in two tallies with respect to a specified axis 23# (filters, nuclides, or scores). The entrywise product performs the arithmetic 24# operation entrywise across the entries in two tallies with respect to a 25# specified axis. 26_PRODUCT_TYPES = ['tensor', 'entrywise'] 27 28# The following indicate acceptable types when setting Tally.scores, 29# Tally.nuclides, and Tally.filters 30_SCORE_CLASSES = (str, openmc.CrossScore, openmc.AggregateScore) 31_NUCLIDE_CLASSES = (str, openmc.CrossNuclide, openmc.AggregateNuclide) 32_FILTER_CLASSES = (openmc.Filter, openmc.CrossFilter, openmc.AggregateFilter) 33 34# Valid types of estimators 35ESTIMATOR_TYPES = ['tracklength', 'collision', 'analog'] 36 37 38class Tally(IDManagerMixin): 39 """A tally defined by a set of scores that are accumulated for a list of 40 nuclides given a set of filters. 41 42 Parameters 43 ---------- 44 tally_id : int, optional 45 Unique identifier for the tally. If none is specified, an identifier 46 will automatically be assigned 47 name : str, optional 48 Name of the tally. If not specified, the name is the empty string. 49 50 Attributes 51 ---------- 52 id : int 53 Unique identifier for the tally 54 name : str 55 Name of the tally 56 filters : list of openmc.Filter 57 List of specified filters for the tally 58 nuclides : list of openmc.Nuclide 59 List of nuclides to score results for 60 scores : list of str 61 List of defined scores, e.g. 'flux', 'fission', etc. 62 estimator : {'analog', 'tracklength', 'collision'} 63 Type of estimator for the tally 64 triggers : list of openmc.Trigger 65 List of tally triggers 66 num_scores : int 67 Total number of scores 68 num_filter_bins : int 69 Total number of filter bins accounting for all filters 70 num_bins : int 71 Total number of bins for the tally 72 shape : 3-tuple of int 73 The shape of the tally data array ordered as the number of filter bins, 74 nuclide bins and score bins 75 filter_strides : list of int 76 Stride in memory for each filter 77 num_realizations : int 78 Total number of realizations 79 with_summary : bool 80 Whether or not a Summary has been linked 81 sum : numpy.ndarray 82 An array containing the sum of each independent realization for each bin 83 sum_sq : numpy.ndarray 84 An array containing the sum of each independent realization squared for 85 each bin 86 mean : numpy.ndarray 87 An array containing the sample mean for each bin 88 std_dev : numpy.ndarray 89 An array containing the sample standard deviation for each bin 90 derived : bool 91 Whether or not the tally is derived from one or more other tallies 92 sparse : bool 93 Whether or not the tally uses SciPy's LIL sparse matrix format for 94 compressed data storage 95 derivative : openmc.TallyDerivative 96 A material perturbation derivative to apply to all scores in the tally. 97 98 """ 99 100 next_id = 1 101 used_ids = set() 102 103 def __init__(self, tally_id=None, name=''): 104 # Initialize Tally class attributes 105 self.id = tally_id 106 self.name = name 107 self._filters = cv.CheckedList(_FILTER_CLASSES, 'tally filters') 108 self._nuclides = cv.CheckedList(_NUCLIDE_CLASSES, 'tally nuclides') 109 self._scores = cv.CheckedList(_SCORE_CLASSES, 'tally scores') 110 self._estimator = None 111 self._triggers = cv.CheckedList(openmc.Trigger, 'tally triggers') 112 self._derivative = None 113 114 self._num_realizations = 0 115 self._with_summary = False 116 117 self._sum = None 118 self._sum_sq = None 119 self._mean = None 120 self._std_dev = None 121 self._with_batch_statistics = False 122 self._derived = False 123 self._sparse = False 124 125 self._sp_filename = None 126 self._results_read = False 127 128 def __repr__(self): 129 parts = ['Tally'] 130 parts.append('{: <15}=\t{}'.format('ID', self.id)) 131 parts.append('{: <15}=\t{}'.format('Name', self.name)) 132 if self.derivative is not None: 133 parts.append('{: <15}=\t{}'.format('Derivative ID', self.derivative.id)) 134 filters = ', '.join(type(f).__name__ for f in self.filters) 135 parts.append('{: <15}=\t{}'.format('Filters', filters)) 136 nuclides = ' '.join(str(nuclide) for nuclide in self.nuclides) 137 parts.append('{: <15}=\t{}'.format('Nuclides', nuclides)) 138 parts.append('{: <15}=\t{}'.format('Scores', self.scores)) 139 parts.append('{: <15}=\t{}'.format('Estimator', self.estimator)) 140 return '\n\t'.join(parts) 141 142 @property 143 def name(self): 144 return self._name 145 146 @property 147 def filters(self): 148 return self._filters 149 150 @property 151 def nuclides(self): 152 return self._nuclides 153 154 @property 155 def num_nuclides(self): 156 return len(self._nuclides) 157 158 @property 159 def scores(self): 160 return self._scores 161 162 @property 163 def num_scores(self): 164 return len(self._scores) 165 166 @property 167 def num_filters(self): 168 return len(self.filters) 169 170 @property 171 def num_filter_bins(self): 172 return reduce(operator.mul, (f.num_bins for f in self.filters), 1) 173 174 @property 175 def num_bins(self): 176 return self.num_filter_bins * self.num_nuclides * self.num_scores 177 178 @property 179 def shape(self): 180 return (self.num_filter_bins, self.num_nuclides, self.num_scores) 181 182 @property 183 def estimator(self): 184 return self._estimator 185 186 @property 187 def triggers(self): 188 return self._triggers 189 190 @property 191 def num_realizations(self): 192 return self._num_realizations 193 194 @property 195 def with_summary(self): 196 return self._with_summary 197 198 def _read_results(self): 199 if self._results_read: 200 return 201 202 # Open the HDF5 statepoint file 203 with h5py.File(self._sp_filename, 'r') as f: 204 # Extract Tally data from the file 205 data = f['tallies/tally {}/results'.format(self.id)] 206 sum_ = data[:, :, 0] 207 sum_sq = data[:, :, 1] 208 209 # Reshape the results arrays 210 sum_ = np.reshape(sum_, self.shape) 211 sum_sq = np.reshape(sum_sq, self.shape) 212 213 # Set the data for this Tally 214 self._sum = sum_ 215 self._sum_sq = sum_sq 216 217 # Convert NumPy arrays to SciPy sparse LIL matrices 218 if self.sparse: 219 self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape) 220 self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(), self._sum_sq.shape) 221 222 # Indicate that Tally results have been read 223 self._results_read = True 224 225 @property 226 def sum(self): 227 if not self._sp_filename or self.derived: 228 return None 229 230 # Make sure results have been read 231 self._read_results() 232 233 if self.sparse: 234 return np.reshape(self._sum.toarray(), self.shape) 235 else: 236 return self._sum 237 238 @property 239 def sum_sq(self): 240 if not self._sp_filename or self.derived: 241 return None 242 243 # Make sure results have been read 244 self._read_results() 245 246 if self.sparse: 247 return np.reshape(self._sum_sq.toarray(), self.shape) 248 else: 249 return self._sum_sq 250 251 @property 252 def mean(self): 253 if self._mean is None: 254 if not self._sp_filename: 255 return None 256 257 self._mean = self.sum / self.num_realizations 258 259 # Convert NumPy array to SciPy sparse LIL matrix 260 if self.sparse: 261 self._mean = sps.lil_matrix(self._mean.flatten(), 262 self._mean.shape) 263 264 if self.sparse: 265 return np.reshape(self._mean.toarray(), self.shape) 266 else: 267 return self._mean 268 269 @property 270 def std_dev(self): 271 if self._std_dev is None: 272 if not self._sp_filename: 273 return None 274 275 n = self.num_realizations 276 nonzero = np.abs(self.mean) > 0 277 self._std_dev = np.zeros_like(self.mean) 278 self._std_dev[nonzero] = np.sqrt((self.sum_sq[nonzero]/n - 279 self.mean[nonzero]**2)/(n - 1)) 280 281 # Convert NumPy array to SciPy sparse LIL matrix 282 if self.sparse: 283 self._std_dev = sps.lil_matrix(self._std_dev.flatten(), 284 self._std_dev.shape) 285 286 self.with_batch_statistics = True 287 288 if self.sparse: 289 return np.reshape(self._std_dev.toarray(), self.shape) 290 else: 291 return self._std_dev 292 293 @property 294 def with_batch_statistics(self): 295 return self._with_batch_statistics 296 297 @property 298 def derived(self): 299 return self._derived 300 301 @property 302 def derivative(self): 303 return self._derivative 304 305 @property 306 def sparse(self): 307 return self._sparse 308 309 @estimator.setter 310 def estimator(self, estimator): 311 cv.check_value('estimator', estimator, ESTIMATOR_TYPES) 312 self._estimator = estimator 313 314 @triggers.setter 315 def triggers(self, triggers): 316 cv.check_type('tally triggers', triggers, MutableSequence) 317 self._triggers = cv.CheckedList(openmc.Trigger, 'tally triggers', 318 triggers) 319 320 @name.setter 321 def name(self, name): 322 cv.check_type('tally name', name, str, none_ok=True) 323 self._name = name 324 325 @derivative.setter 326 def derivative(self, deriv): 327 cv.check_type('tally derivative', deriv, openmc.TallyDerivative, 328 none_ok=True) 329 self._derivative = deriv 330 331 @filters.setter 332 def filters(self, filters): 333 cv.check_type('tally filters', filters, MutableSequence) 334 335 # If the filter is already in the Tally, raise an error 336 visited_filters = set() 337 for f in filters: 338 if f in visited_filters: 339 msg = 'Unable to add a duplicate filter "{}" to Tally ID="{}" ' \ 340 'since duplicate filters are not supported in the OpenMC ' \ 341 'Python API'.format(f, self.id) 342 raise ValueError(msg) 343 visited_filters.add(f) 344 345 self._filters = cv.CheckedList(_FILTER_CLASSES, 'tally filters', filters) 346 347 @nuclides.setter 348 def nuclides(self, nuclides): 349 cv.check_type('tally nuclides', nuclides, MutableSequence) 350 351 # If the nuclide is already in the Tally, raise an error 352 visited_nuclides = set() 353 for nuc in nuclides: 354 if nuc in visited_nuclides: 355 msg = 'Unable to add a duplicate nuclide "{}" to Tally ID="{}" ' \ 356 'since duplicate nuclides are not supported in the OpenMC ' \ 357 'Python API'.format(nuc, self.id) 358 raise ValueError(msg) 359 visited_nuclides.add(nuc) 360 361 self._nuclides = cv.CheckedList(_NUCLIDE_CLASSES, 'tally nuclides', 362 nuclides) 363 364 @scores.setter 365 def scores(self, scores): 366 cv.check_type('tally scores', scores, MutableSequence) 367 368 visited_scores = set() 369 for i, score in enumerate(scores): 370 # If the score is already in the Tally, raise an error 371 if score in visited_scores: 372 msg = 'Unable to add a duplicate score "{}" to Tally ID="{}" ' \ 373 'since duplicate scores are not supported in the OpenMC ' \ 374 'Python API'.format(score, self.id) 375 raise ValueError(msg) 376 visited_scores.add(score) 377 378 # If score is a string, strip whitespace 379 if isinstance(score, str): 380 # Check to see if scores are deprecated before storing 381 for deprecated in ['scatter-', 'nu-scatter-', 'scatter-p', 382 'nu-scatter-p', 'scatter-y', 'nu-scatter-y', 383 'flux-y', 'total-y']: 384 if score.strip().startswith(deprecated): 385 msg = score.strip() + ' is no longer supported.' 386 raise ValueError(msg) 387 scores[i] = score.strip() 388 389 self._scores = cv.CheckedList(_SCORE_CLASSES, 'tally scores', scores) 390 391 @num_realizations.setter 392 def num_realizations(self, num_realizations): 393 cv.check_type('number of realizations', num_realizations, Integral) 394 cv.check_greater_than('number of realizations', num_realizations, 0, True) 395 self._num_realizations = num_realizations 396 397 @with_summary.setter 398 def with_summary(self, with_summary): 399 cv.check_type('with_summary', with_summary, bool) 400 self._with_summary = with_summary 401 402 @with_batch_statistics.setter 403 def with_batch_statistics(self, with_batch_statistics): 404 cv.check_type('with_batch_statistics', with_batch_statistics, bool) 405 self._with_batch_statistics = with_batch_statistics 406 407 @sum.setter 408 def sum(self, sum): 409 cv.check_type('sum', sum, Iterable) 410 self._sum = sum 411 412 @sum_sq.setter 413 def sum_sq(self, sum_sq): 414 cv.check_type('sum_sq', sum_sq, Iterable) 415 self._sum_sq = sum_sq 416 417 @sparse.setter 418 def sparse(self, sparse): 419 """Convert tally data from NumPy arrays to SciPy list of lists (LIL) 420 sparse matrices, and vice versa. 421 422 This property may be used to reduce the amount of data in memory during 423 tally data processing. The tally data will be stored as SciPy LIL 424 matrices internally within the Tally object. All tally data access 425 properties and methods will return data as a dense NumPy array. 426 427 """ 428 429 cv.check_type('sparse', sparse, bool) 430 431 # Convert NumPy arrays to SciPy sparse LIL matrices 432 if sparse and not self.sparse: 433 if self._sum is not None: 434 self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape) 435 if self._sum_sq is not None: 436 self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(), 437 self._sum_sq.shape) 438 if self._mean is not None: 439 self._mean = sps.lil_matrix(self._mean.flatten(), 440 self._mean.shape) 441 if self._std_dev is not None: 442 self._std_dev = sps.lil_matrix(self._std_dev.flatten(), 443 self._std_dev.shape) 444 445 self._sparse = True 446 447 # Convert SciPy sparse LIL matrices to NumPy arrays 448 elif not sparse and self.sparse: 449 if self._sum is not None: 450 self._sum = np.reshape(self._sum.toarray(), self.shape) 451 if self._sum_sq is not None: 452 self._sum_sq = np.reshape(self._sum_sq.toarray(), self.shape) 453 if self._mean is not None: 454 self._mean = np.reshape(self._mean.toarray(), self.shape) 455 if self._std_dev is not None: 456 self._std_dev = np.reshape(self._std_dev.toarray(), self.shape) 457 self._sparse = False 458 459 def remove_score(self, score): 460 """Remove a score from the tally 461 462 Parameters 463 ---------- 464 score : str 465 Score to remove 466 467 """ 468 469 if score not in self.scores: 470 msg = 'Unable to remove score "{}" from Tally ID="{}" since ' \ 471 'the Tally does not contain this score'.format(score, self.id) 472 raise ValueError(msg) 473 474 self._scores.remove(score) 475 476 def remove_filter(self, old_filter): 477 """Remove a filter from the tally 478 479 Parameters 480 ---------- 481 old_filter : openmc.Filter 482 Filter to remove 483 484 """ 485 486 if old_filter not in self.filters: 487 msg = 'Unable to remove filter "{}" from Tally ID="{}" since the ' \ 488 'Tally does not contain this filter'.format(old_filter, self.id) 489 raise ValueError(msg) 490 491 self._filters.remove(old_filter) 492 493 def remove_nuclide(self, nuclide): 494 """Remove a nuclide from the tally 495 496 Parameters 497 ---------- 498 nuclide : openmc.Nuclide 499 Nuclide to remove 500 501 """ 502 503 if nuclide not in self.nuclides: 504 msg = 'Unable to remove nuclide "{}" from Tally ID="{}" since the ' \ 505 'Tally does not contain this nuclide'.format(nuclide, self.id) 506 raise ValueError(msg) 507 508 self._nuclides.remove(nuclide) 509 510 def _can_merge_filters(self, other): 511 """Determine if another tally's filters can be merged with this one's 512 513 The types of filters between the two tallies must match identically. 514 The bins in all of the filters must match identically, or be mergeable 515 in only one filter. This is a helper method for the can_merge(...) 516 and merge(...) methods. 517 518 Parameters 519 ---------- 520 other : openmc.Tally 521 Tally to check for mergeable filters 522 523 """ 524 525 # Two tallies must have the same number of filters 526 if len(self.filters) != len(other.filters): 527 return False 528 529 # Return False if only one tally has a delayed group filter 530 tally1_dg = self.contains_filter(openmc.DelayedGroupFilter) 531 tally2_dg = other.contains_filter(openmc.DelayedGroupFilter) 532 if tally1_dg != tally2_dg: 533 return False 534 535 # Look to see if all filters are the same, or one or more can be merged 536 for filter1 in self.filters: 537 mergeable = False 538 539 for filter2 in other.filters: 540 if filter1 == filter2 or filter1.can_merge(filter2): 541 mergeable = True 542 break 543 544 # If no mergeable filter was found, the tallies are not mergeable 545 if not mergeable: 546 return False 547 548 # Tally filters are mergeable if all conditional checks passed 549 return True 550 551 def _can_merge_nuclides(self, other): 552 """Determine if another tally's nuclides can be merged with this one's 553 554 The nuclides between the two tallies must be mutually exclusive or 555 identically matching. This is a helper method for the can_merge(...) 556 and merge(...) methods. 557 558 Parameters 559 ---------- 560 other : openmc.Tally 561 Tally to check for mergeable nuclides 562 563 """ 564 565 no_nuclides_match = True 566 all_nuclides_match = True 567 568 # Search for each of this tally's nuclides in the other tally 569 for nuclide in self.nuclides: 570 if nuclide not in other.nuclides: 571 all_nuclides_match = False 572 else: 573 no_nuclides_match = False 574 575 # Search for each of the other tally's nuclides in this tally 576 for nuclide in other.nuclides: 577 if nuclide not in self.nuclides: 578 all_nuclides_match = False 579 else: 580 no_nuclides_match = False 581 582 # Either all nuclides should match, or none should 583 return no_nuclides_match or all_nuclides_match 584 585 def _can_merge_scores(self, other): 586 """Determine if another tally's scores can be merged with this one's 587 588 The scores between the two tallies must be mutually exclusive or 589 identically matching. This is a helper method for the can_merge(...) 590 and merge(...) methods. 591 592 Parameters 593 ---------- 594 other : openmc.Tally 595 Tally to check for mergeable scores 596 597 """ 598 599 no_scores_match = True 600 all_scores_match = True 601 602 # Search for each of this tally's scores in the other tally 603 for score in self.scores: 604 if score in other.scores: 605 no_scores_match = False 606 607 # Search for each of the other tally's scores in this tally 608 for score in other.scores: 609 if score not in self.scores: 610 all_scores_match = False 611 else: 612 no_scores_match = False 613 614 if score == 'current' and score not in self.scores: 615 return False 616 617 # Nuclides cannot be specified on 'flux' scores 618 if 'flux' in self.scores or 'flux' in other.scores: 619 if self.nuclides != other.nuclides: 620 return False 621 622 # Either all scores should match, or none should 623 return no_scores_match or all_scores_match 624 625 def can_merge(self, other): 626 """Determine if another tally can be merged with this one 627 628 If results have been loaded from a statepoint, then tallies are only 629 mergeable along one and only one of filter bins, nuclides or scores. 630 631 Parameters 632 ---------- 633 other : openmc.Tally 634 Tally to check for merging 635 636 """ 637 638 if not isinstance(other, Tally): 639 return False 640 641 # Must have same estimator 642 if self.estimator != other.estimator: 643 return False 644 645 equal_filters = sorted(self.filters) == sorted(other.filters) 646 equal_nuclides = sorted(self.nuclides) == sorted(other.nuclides) 647 equal_scores = sorted(self.scores) == sorted(other.scores) 648 equality = [equal_filters, equal_nuclides, equal_scores] 649 650 # If all filters, nuclides and scores match then tallies are mergeable 651 if all(equality): 652 return True 653 654 # Variables to indicate filter bins, nuclides, and scores that can be merged 655 can_merge_filters = self._can_merge_filters(other) 656 can_merge_nuclides = self._can_merge_nuclides(other) 657 can_merge_scores = self._can_merge_scores(other) 658 mergeability = [can_merge_filters, can_merge_nuclides, can_merge_scores] 659 660 if not all(mergeability): 661 return False 662 663 # If the tally results have been read from the statepoint, at least two 664 # of filters, nuclides and scores must match 665 else: 666 return not self._results_read or sum(equality) >= 2 667 668 def merge(self, other): 669 """Merge another tally with this one 670 671 If results have been loaded from a statepoint, then tallies are only 672 mergeable along one and only one of filter bins, nuclides or scores. 673 674 Parameters 675 ---------- 676 other : openmc.Tally 677 Tally to merge with this one 678 679 Returns 680 ------- 681 merged_tally : openmc.Tally 682 Merged tallies 683 684 """ 685 686 if not self.can_merge(other): 687 msg = 'Unable to merge tally ID="{}" with "{}"'.format( 688 other.id, self.id) 689 raise ValueError(msg) 690 691 # Create deep copy of tally to return as merged tally 692 merged_tally = copy.deepcopy(self) 693 694 # Differentiate Tally with a new auto-generated Tally ID 695 merged_tally.id = None 696 697 # Create deep copy of other tally to use for array concatenation 698 other_copy = copy.deepcopy(other) 699 700 # Identify if filters, nuclides and scores are mergeable and/or equal 701 merge_filters = self._can_merge_filters(other) 702 merge_nuclides = self._can_merge_nuclides(other) 703 merge_scores = self._can_merge_scores(other) 704 equal_filters = sorted(self.filters) == sorted(other.filters) 705 equal_nuclides = sorted(self.nuclides) == sorted(other.nuclides) 706 equal_scores = sorted(self.scores) == sorted(other.scores) 707 708 # If two tallies can be merged along a filter's bins 709 if merge_filters and not equal_filters: 710 711 # Search for mergeable filters 712 for i, filter1 in enumerate(self.filters): 713 for filter2 in other.filters: 714 if filter1 != filter2 and filter1.can_merge(filter2): 715 other_copy._swap_filters(other_copy.filters[i], filter2) 716 merged_tally.filters[i] = filter1.merge(filter2) 717 join_right = filter1 < filter2 718 merge_axis = i 719 break 720 721 # If two tallies can be merged along nuclide bins 722 if merge_nuclides and not equal_nuclides: 723 merge_axis = self.num_filters 724 join_right = True 725 726 # Add unique nuclides from other tally to merged tally 727 for nuclide in other.nuclides: 728 if nuclide not in merged_tally.nuclides: 729 merged_tally.nuclides.append(nuclide) 730 731 # If two tallies can be merged along score bins 732 if merge_scores and not equal_scores: 733 merge_axis = self.num_filters + 1 734 join_right = True 735 736 # Add unique scores from other tally to merged tally 737 for score in other.scores: 738 if score not in merged_tally.scores: 739 merged_tally.scores.append(score) 740 741 # Add triggers from other tally to merged tally 742 for trigger in other.triggers: 743 merged_tally.triggers.append(trigger) 744 745 # If results have not been read, then return tally for input generation 746 if self._results_read is None: 747 return merged_tally 748 # Otherwise, this is a derived tally which needs merged results arrays 749 else: 750 self._derived = True 751 752 # Concatenate sum arrays if present in both tallies 753 if self.sum is not None and other_copy.sum is not None: 754 self_sum = self.get_reshaped_data(value='sum') 755 other_sum = other_copy.get_reshaped_data(value='sum') 756 757 if join_right: 758 merged_sum = np.concatenate((self_sum, other_sum), 759 axis=merge_axis) 760 else: 761 merged_sum = np.concatenate((other_sum, self_sum), 762 axis=merge_axis) 763 764 merged_tally._sum = np.reshape(merged_sum, merged_tally.shape) 765 766 # Concatenate sum_sq arrays if present in both tallies 767 if self.sum_sq is not None and other.sum_sq is not None: 768 self_sum_sq = self.get_reshaped_data(value='sum_sq') 769 other_sum_sq = other_copy.get_reshaped_data(value='sum_sq') 770 771 if join_right: 772 merged_sum_sq = np.concatenate((self_sum_sq, other_sum_sq), 773 axis=merge_axis) 774 else: 775 merged_sum_sq = np.concatenate((other_sum_sq, self_sum_sq), 776 axis=merge_axis) 777 778 merged_tally._sum_sq = np.reshape(merged_sum_sq, merged_tally.shape) 779 780 # Concatenate mean arrays if present in both tallies 781 if self.mean is not None and other.mean is not None: 782 self_mean = self.get_reshaped_data(value='mean') 783 other_mean = other_copy.get_reshaped_data(value='mean') 784 785 if join_right: 786 merged_mean = np.concatenate((self_mean, other_mean), 787 axis=merge_axis) 788 else: 789 merged_mean = np.concatenate((other_mean, self_mean), 790 axis=merge_axis) 791 792 merged_tally._mean = np.reshape(merged_mean, merged_tally.shape) 793 794 # Concatenate std. dev. arrays if present in both tallies 795 if self.std_dev is not None and other.std_dev is not None: 796 self_std_dev = self.get_reshaped_data(value='std_dev') 797 other_std_dev = other_copy.get_reshaped_data(value='std_dev') 798 799 if join_right: 800 merged_std_dev = np.concatenate((self_std_dev, other_std_dev), 801 axis=merge_axis) 802 else: 803 merged_std_dev = np.concatenate((other_std_dev, self_std_dev), 804 axis=merge_axis) 805 806 merged_tally._std_dev = np.reshape(merged_std_dev, merged_tally.shape) 807 808 # Sparsify merged tally if both tallies are sparse 809 merged_tally.sparse = self.sparse and other.sparse 810 811 return merged_tally 812 813 def to_xml_element(self): 814 """Return XML representation of the tally 815 816 Returns 817 ------- 818 element : xml.etree.ElementTree.Element 819 XML element containing tally data 820 821 """ 822 823 element = ET.Element("tally") 824 825 # Tally ID 826 element.set("id", str(self.id)) 827 828 # Optional Tally name 829 if self.name != '': 830 element.set("name", self.name) 831 832 # Optional Tally filters 833 if len(self.filters) > 0: 834 subelement = ET.SubElement(element, "filters") 835 subelement.text = ' '.join(str(f.id) for f in self.filters) 836 837 # Optional Nuclides 838 if self.nuclides: 839 subelement = ET.SubElement(element, "nuclides") 840 subelement.text = ' '.join(str(n) for n in self.nuclides) 841 842 # Scores 843 if len(self.scores) == 0: 844 msg = 'Unable to get XML for Tally ID="{}" since it does not ' \ 845 'contain any scores'.format(self.id) 846 raise ValueError(msg) 847 848 else: 849 scores = '' 850 for score in self.scores: 851 scores += '{} '.format(score) 852 853 subelement = ET.SubElement(element, "scores") 854 subelement.text = scores.rstrip(' ') 855 856 # Tally estimator type 857 if self.estimator is not None: 858 subelement = ET.SubElement(element, "estimator") 859 subelement.text = self.estimator 860 861 # Optional Triggers 862 for trigger in self.triggers: 863 trigger.get_trigger_xml(element) 864 865 # Optional derivatives 866 if self.derivative is not None: 867 subelement = ET.SubElement(element, "derivative") 868 subelement.text = str(self.derivative.id) 869 870 return element 871 872 def contains_filter(self, filter_type): 873 """Looks for a filter in the tally that matches a specified type 874 875 Parameters 876 ---------- 877 filter_type : openmc.FilterMeta 878 Type of the filter, e.g. MeshFilter 879 880 Returns 881 ------- 882 filter_found : bool 883 True if the tally contains a filter of the requested type; 884 otherwise false 885 886 """ 887 for test_filter in self.filters: 888 if type(test_filter) is filter_type: 889 return True 890 return False 891 892 def find_filter(self, filter_type): 893 """Return a filter in the tally that matches a specified type 894 895 Parameters 896 ---------- 897 filter_type : openmc.FilterMeta 898 Type of the filter, e.g. MeshFilter 899 900 Returns 901 ------- 902 filter_found : openmc.Filter 903 Filter from this tally with matching type, or None if no matching 904 Filter is found 905 906 Raises 907 ------ 908 ValueError 909 If no matching Filter is found 910 911 """ 912 913 # Look through all of this Tally's Filters for the type requested 914 for test_filter in self.filters: 915 if type(test_filter) is filter_type: 916 return test_filter 917 918 # Also check to see if the desired filter is wrapped up in an 919 # aggregate 920 elif isinstance(test_filter, openmc.AggregateFilter): 921 if isinstance(test_filter.aggregate_filter, filter_type): 922 return test_filter 923 924 # If we did not find the Filter, throw an Exception 925 msg = 'Unable to find filter type "{}" in Tally ID="{}"'.format( 926 filter_type, self.id) 927 raise ValueError(msg) 928 929 def get_nuclide_index(self, nuclide): 930 """Returns the index in the Tally's results array for a Nuclide bin 931 932 Parameters 933 ---------- 934 nuclide : str 935 The name of the Nuclide (e.g., 'H1', 'U238') 936 937 Returns 938 ------- 939 nuclide_index : int 940 The index in the Tally data array for this nuclide. 941 942 Raises 943 ------ 944 KeyError 945 When the argument passed to the 'nuclide' parameter cannot be found 946 in the Tally. 947 948 """ 949 # Look for the user-requested nuclide in all of the Tally's Nuclides 950 for i, test_nuclide in enumerate(self.nuclides): 951 # If the Summary was linked, then values are Nuclide objects 952 if isinstance(test_nuclide, openmc.Nuclide): 953 if test_nuclide.name == nuclide: 954 return i 955 956 # If the Summary has not been linked, then values are ZAIDs 957 else: 958 if test_nuclide == nuclide: 959 return i 960 961 msg = ('Unable to get the nuclide index for Tally since "{}" ' 962 'is not one of the nuclides'.format(nuclide)) 963 raise KeyError(msg) 964 965 def get_score_index(self, score): 966 """Returns the index in the Tally's results array for a score bin 967 968 Parameters 969 ---------- 970 score : str 971 The score string (e.g., 'absorption', 'nu-fission') 972 973 Returns 974 ------- 975 score_index : int 976 The index in the Tally data array for this score. 977 978 Raises 979 ------ 980 ValueError 981 When the argument passed to the 'score' parameter cannot be found in 982 the Tally. 983 984 """ 985 986 try: 987 score_index = self.scores.index(score) 988 989 except ValueError: 990 msg = 'Unable to get the score index for Tally since "{}" ' \ 991 'is not one of the scores'.format(score) 992 raise ValueError(msg) 993 994 return score_index 995 996 def get_filter_indices(self, filters=[], filter_bins=[]): 997 """Get indices into the filter axis of this tally's data arrays. 998 999 This is a helper method for the Tally.get_values(...) method to 1000 extract tally data. This method returns the indices into the filter 1001 axis of the tally's data array (axis=0) for particular combinations 1002 of filters and their corresponding bins. 1003 1004 Parameters 1005 ---------- 1006 filters : Iterable of openmc.FilterMeta 1007 An iterable of filter types 1008 (e.g., [MeshFilter, EnergyFilter]; default is []) 1009 filter_bins : Iterable of tuple 1010 A list of tuples of filter bins corresponding to the filter_types 1011 parameter (e.g., [(1,), ((0., 0.625e-6),)]; default is []). Each 1012 tuple contains bins for the corresponding filter type in the filters 1013 parameter. Each bin is an integer ID for Material-, Surface-, 1014 Cell-, Cellborn-, and Universe- Filters. Each bin is an integer 1015 for the cell instance ID for DistribcellFilters. Each bin is a 1016 2-tuple of floats for Energy- and Energyout- Filters corresponding 1017 to the energy boundaries of the bin of interest. The bin is an 1018 (x,y,z) 3-tuple for MeshFilters corresponding to the mesh cell 1019 of interest. The order of the bins in the list must correspond to 1020 the filter_types parameter. 1021 1022 Returns 1023 ------- 1024 numpy.ndarray 1025 A NumPy array of the filter indices 1026 1027 """ 1028 1029 cv.check_type('filters', filters, Iterable, openmc.FilterMeta) 1030 cv.check_type('filter_bins', filter_bins, Iterable, tuple) 1031 1032 # If user did not specify any specific Filters, use them all 1033 if not filters: 1034 return np.arange(self.num_filter_bins) 1035 1036 # Initialize empty list of indices for each bin in each Filter 1037 filter_indices = [] 1038 1039 # Loop over all of the Tally's Filters 1040 for i, self_filter in enumerate(self.filters): 1041 # If a user-requested Filter, get the user-requested bins 1042 for j, test_filter in enumerate(filters): 1043 if type(self_filter) is test_filter: 1044 bins = filter_bins[j] 1045 break 1046 else: 1047 # If not a user-requested Filter, get all bins 1048 if isinstance(self_filter, openmc.DistribcellFilter): 1049 # Create list of cell instance IDs for distribcell Filters 1050 bins = list(range(self_filter.num_bins)) 1051 1052 elif isinstance(self_filter, openmc.EnergyFunctionFilter): 1053 # EnergyFunctionFilters don't have bins so just add a None 1054 bins = [None] 1055 1056 else: 1057 # Create list of IDs for bins for all other filter types 1058 bins = self_filter.bins 1059 1060 # Add indices for each bin in this Filter to the list 1061 indices = np.array([self_filter.get_bin_index(b) for b in bins]) 1062 filter_indices.append(indices) 1063 1064 # Account for stride in each of the previous filters 1065 for indices in filter_indices[:i]: 1066 indices *= self_filter.num_bins 1067 1068 # Apply outer product sum between all filter bin indices 1069 return list(map(sum, product(*filter_indices))) 1070 1071 def get_nuclide_indices(self, nuclides): 1072 """Get indices into the nuclide axis of this tally's data arrays. 1073 1074 This is a helper method for the Tally.get_values(...) method to 1075 extract tally data. This method returns the indices into the nuclide 1076 axis of the tally's data array (axis=1) for one or more nuclides. 1077 1078 Parameters 1079 ---------- 1080 nuclides : list of str 1081 A list of nuclide name strings 1082 (e.g., ['U235', 'U238']; default is []) 1083 1084 Returns 1085 ------- 1086 numpy.ndarray 1087 A NumPy array of the nuclide indices 1088 1089 """ 1090 1091 cv.check_iterable_type('nuclides', nuclides, str) 1092 1093 # If user did not specify any specific Nuclides, use them all 1094 if not nuclides: 1095 return np.arange(self.num_nuclides) 1096 1097 # Determine the score indices from any of the requested scores 1098 nuclide_indices = np.zeros_like(nuclides, dtype=int) 1099 for i, nuclide in enumerate(nuclides): 1100 nuclide_indices[i] = self.get_nuclide_index(nuclide) 1101 return nuclide_indices 1102 1103 def get_score_indices(self, scores): 1104 """Get indices into the score axis of this tally's data arrays. 1105 1106 This is a helper method for the Tally.get_values(...) method to 1107 extract tally data. This method returns the indices into the score 1108 axis of the tally's data array (axis=2) for one or more scores. 1109 1110 Parameters 1111 ---------- 1112 scores : list of str or openmc.CrossScore 1113 A list of one or more score strings 1114 (e.g., ['absorption', 'nu-fission']; default is []) 1115 1116 Returns 1117 ------- 1118 numpy.ndarray 1119 A NumPy array of the score indices 1120 1121 """ 1122 1123 for score in scores: 1124 if not isinstance(score, (str, openmc.CrossScore)): 1125 msg = 'Unable to get score indices for score "{}" in Tally ' \ 1126 'ID="{}" since it is not a string or CrossScore'\ 1127 .format(score, self.id) 1128 raise ValueError(msg) 1129 1130 # Determine the score indices from any of the requested scores 1131 if scores: 1132 score_indices = np.zeros(len(scores), dtype=int) 1133 for i, score in enumerate(scores): 1134 score_indices[i] = self.get_score_index(score) 1135 1136 # If user did not specify any specific scores, use them all 1137 else: 1138 score_indices = np.arange(self.num_scores) 1139 1140 return score_indices 1141 1142 def get_values(self, scores=[], filters=[], filter_bins=[], 1143 nuclides=[], value='mean'): 1144 """Returns one or more tallied values given a list of scores, filters, 1145 filter bins and nuclides. 1146 1147 This method constructs a 3D NumPy array for the requested Tally data 1148 indexed by filter bin, nuclide bin, and score index. The method will 1149 order the data in the array as specified in the parameter lists. 1150 1151 Parameters 1152 ---------- 1153 scores : list of str 1154 A list of one or more score strings 1155 (e.g., ['absorption', 'nu-fission']; default is []) 1156 filters : Iterable of openmc.FilterMeta 1157 An iterable of filter types 1158 (e.g., [MeshFilter, EnergyFilter]; default is []) 1159 filter_bins : list of Iterables 1160 A list of tuples of filter bins corresponding to the filter_types 1161 parameter (e.g., [(1,), ((0., 0.625e-6),)]; default is []). Each 1162 tuple contains bins for the corresponding filter type in the filters 1163 parameter. Each bins is the integer ID for 'material', 'surface', 1164 'cell', 'cellborn', and 'universe' Filters. Each bin is an integer 1165 for the cell instance ID for 'distribcell' Filters. Each bin is a 1166 2-tuple of floats for 'energy' and 'energyout' filters corresponding 1167 to the energy boundaries of the bin of interest. The bin is an 1168 (x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell 1169 of interest. The order of the bins in the list must correspond to 1170 the filter_types parameter. 1171 nuclides : list of str 1172 A list of nuclide name strings 1173 (e.g., ['U235', 'U238']; default is []) 1174 value : str 1175 A string for the type of value to return - 'mean' (default), 1176 'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted 1177 1178 Returns 1179 ------- 1180 float or numpy.ndarray 1181 A scalar or NumPy array of the Tally data indexed in the order 1182 each filter, nuclide and score is listed in the parameters. 1183 1184 Raises 1185 ------ 1186 ValueError 1187 When this method is called before the Tally is populated with data, 1188 or the input parameters do not correspond to the Tally's attributes, 1189 e.g., if the score(s) do not match those in the Tally. 1190 1191 """ 1192 1193 # Ensure that the tally has data 1194 if (value == 'mean' and self.mean is None) or \ 1195 (value == 'std_dev' and self.std_dev is None) or \ 1196 (value == 'rel_err' and self.mean is None) or \ 1197 (value == 'sum' and self.sum is None) or \ 1198 (value == 'sum_sq' and self.sum_sq is None): 1199 msg = 'The Tally ID="{}" has no data to return'.format(self.id) 1200 raise ValueError(msg) 1201 1202 # Get filter, nuclide and score indices 1203 filter_indices = self.get_filter_indices(filters, filter_bins) 1204 nuclide_indices = self.get_nuclide_indices(nuclides) 1205 score_indices = self.get_score_indices(scores) 1206 1207 # Construct outer product of all three index types with each other 1208 indices = np.ix_(filter_indices, nuclide_indices, score_indices) 1209 1210 # Return the desired result from Tally 1211 if value == 'mean': 1212 data = self.mean[indices] 1213 elif value == 'std_dev': 1214 data = self.std_dev[indices] 1215 elif value == 'rel_err': 1216 data = self.std_dev[indices] / self.mean[indices] 1217 elif value == 'sum': 1218 data = self.sum[indices] 1219 elif value == 'sum_sq': 1220 data = self.sum_sq[indices] 1221 else: 1222 msg = 'Unable to return results from Tally ID="{}" since the ' \ 1223 'the requested value "{}" is not \'mean\', \'std_dev\', ' \ 1224 '\'rel_err\', \'sum\', or \'sum_sq\''.format(self.id, value) 1225 raise LookupError(msg) 1226 1227 return data 1228 1229 def get_pandas_dataframe(self, filters=True, nuclides=True, scores=True, 1230 derivative=True, paths=True, float_format='{:.2e}'): 1231 """Build a Pandas DataFrame for the Tally data. 1232 1233 This method constructs a Pandas DataFrame object for the Tally data 1234 with columns annotated by filter, nuclide and score bin information. 1235 1236 This capability has been tested for Pandas >=0.13.1. However, it is 1237 recommended to use v0.16 or newer versions of Pandas since this method 1238 uses the Multi-index Pandas feature. 1239 1240 Parameters 1241 ---------- 1242 filters : bool 1243 Include columns with filter bin information (default is True). 1244 nuclides : bool 1245 Include columns with nuclide bin information (default is True). 1246 scores : bool 1247 Include columns with score bin information (default is True). 1248 derivative : bool 1249 Include columns with differential tally info (default is True). 1250 paths : bool, optional 1251 Construct columns for distribcell tally filters (default is True). 1252 The geometric information in the Summary object is embedded into a 1253 Multi-index column with a geometric "path" to each distribcell 1254 instance. 1255 float_format : str 1256 All floats in the DataFrame will be formatted using the given 1257 format string before printing. 1258 1259 Returns 1260 ------- 1261 pandas.DataFrame 1262 A Pandas DataFrame with each column annotated by filter, nuclide and 1263 score bin information (if these parameters are True), and the mean 1264 and standard deviation of the Tally's data. 1265 1266 Raises 1267 ------ 1268 KeyError 1269 When this method is called before the Tally is populated with data 1270 1271 """ 1272 1273 # Ensure that the tally has data 1274 if self.mean is None or self.std_dev is None: 1275 msg = 'The Tally ID="{}" has no data to return'.format(self.id) 1276 raise KeyError(msg) 1277 1278 # Initialize a pandas dataframe for the tally data 1279 df = pd.DataFrame() 1280 1281 # Find the total length of the tally data array 1282 data_size = self.mean.size 1283 1284 # Build DataFrame columns for filters if user requested them 1285 if filters: 1286 # Append each Filter's DataFrame to the overall DataFrame 1287 for f, stride in zip(self.filters, self.filter_strides): 1288 filter_df = f.get_pandas_dataframe( 1289 data_size, stride, paths=paths) 1290 df = pd.concat([df, filter_df], axis=1) 1291 1292 # Include DataFrame column for nuclides if user requested it 1293 if nuclides: 1294 nuclides = [] 1295 column_name = 'nuclide' 1296 1297 for nuclide in self.nuclides: 1298 if isinstance(nuclide, openmc.Nuclide): 1299 nuclides.append(nuclide.name) 1300 elif isinstance(nuclide, openmc.AggregateNuclide): 1301 nuclides.append(nuclide.name) 1302 column_name = '{}(nuclide)'.format(nuclide.aggregate_op) 1303 else: 1304 nuclides.append(nuclide) 1305 1306 # Tile the nuclide bins into a DataFrame column 1307 nuclides = np.repeat(nuclides, len(self.scores)) 1308 tile_factor = data_size / len(nuclides) 1309 df[column_name] = np.tile(nuclides, int(tile_factor)) 1310 1311 # Include column for scores if user requested it 1312 if scores: 1313 scores = [] 1314 column_name = 'score' 1315 1316 for score in self.scores: 1317 if isinstance(score, (str, openmc.CrossScore)): 1318 scores.append(str(score)) 1319 elif isinstance(score, openmc.AggregateScore): 1320 scores.append(score.name) 1321 column_name = '{}(score)'.format(score.aggregate_op) 1322 1323 tile_factor = data_size / len(self.scores) 1324 df[column_name] = np.tile(scores, int(tile_factor)) 1325 1326 # Include columns for derivatives if user requested it 1327 if derivative and (self.derivative is not None): 1328 df['d_variable'] = self.derivative.variable 1329 if self.derivative.material is not None: 1330 df['d_material'] = self.derivative.material 1331 if self.derivative.nuclide is not None: 1332 df['d_nuclide'] = self.derivative.nuclide 1333 1334 # Append columns with mean, std. dev. for each tally bin 1335 df['mean'] = self.mean.ravel() 1336 df['std. dev.'] = self.std_dev.ravel() 1337 1338 df = df.dropna(axis=1) 1339 1340 # Expand the columns into Pandas MultiIndices for readability 1341 if pd.__version__ >= '0.16': 1342 columns = copy.deepcopy(df.columns.values) 1343 1344 # Convert all elements in columns list to tuples 1345 for i, column in enumerate(columns): 1346 if not isinstance(column, tuple): 1347 columns[i] = (column,) 1348 1349 # Make each tuple the same length 1350 max_len_column = len(max(columns, key=len)) 1351 for i, column in enumerate(columns): 1352 delta_len = max_len_column - len(column) 1353 if delta_len > 0: 1354 new_column = list(column) 1355 new_column.extend(['']*delta_len) 1356 columns[i] = tuple(new_column) 1357 1358 # Create and set a MultiIndex for the DataFrame's columns, but only 1359 # if any column actually is multi-level (e.g., a mesh filter) 1360 if any(len(c) > 1 for c in columns): 1361 df.columns = pd.MultiIndex.from_tuples(columns) 1362 1363 # Modify the df.to_string method so that it prints formatted strings. 1364 # Credit to http://stackoverflow.com/users/3657742/chrisb for this trick 1365 df.to_string = partial(df.to_string, float_format=float_format.format) 1366 1367 return df 1368 1369 def get_reshaped_data(self, value='mean'): 1370 """Returns an array of tally data with one dimension per filter. 1371 1372 The tally data in OpenMC is stored as a 3D array with the dimensions 1373 corresponding to filters, nuclides and scores. As a result, tally data 1374 can be opaque for a user to directly index (i.e., without use of 1375 :meth:`openmc.Tally.get_values`) since one must know how to properly use 1376 the number of bins and strides for each filter to index into the first 1377 (filter) dimension. 1378 1379 This builds and returns a reshaped version of the tally data array with 1380 unique dimensions corresponding to each tally filter. For example, 1381 suppose this tally has arrays of data with shape (8,5,5) corresponding 1382 to two filters (2 and 4 bins, respectively), five nuclides and five 1383 scores. This method will return a version of the data array with the 1384 with a new shape of (2,4,5,5) such that the first two dimensions 1385 correspond directly to the two filters with two and four bins. 1386 1387 Parameters 1388 ---------- 1389 value : str 1390 A string for the type of value to return - 'mean' (default), 1391 'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted 1392 1393 Returns 1394 ------- 1395 numpy.ndarray 1396 The tally data array indexed by filters, nuclides and scores. 1397 1398 """ 1399 1400 # Get the 3D array of data in filters, nuclides and scores 1401 data = self.get_values(value=value) 1402 1403 # Build a new array shape with one dimension per filter 1404 new_shape = tuple(f.num_bins for f in self.filters) 1405 new_shape += (self.num_nuclides, self.num_scores) 1406 1407 # Reshape the data with one dimension for each filter 1408 data = np.reshape(data, new_shape) 1409 return data 1410 1411 def hybrid_product(self, other, binary_op, filter_product=None, 1412 nuclide_product=None, score_product=None): 1413 """Combines filters, scores and nuclides with another tally. 1414 1415 This is a helper method for the tally arithmetic operator overloaded 1416 methods. It is called a "hybrid product" because it performs a 1417 combination of tensor (or Kronecker) and entrywise (or Hadamard) 1418 products. The filters from both tallies are combined using an entrywise 1419 (or Hadamard) product on matching filters. By default, if all nuclides 1420 are identical in the two tallies, the entrywise product is performed 1421 across nuclides; else the tensor product is performed. By default, if 1422 all scores are identical in the two tallies, the entrywise product is 1423 performed across scores; else the tensor product is performed. Users 1424 can also call the method explicitly and specify the desired product. 1425 1426 Parameters 1427 ---------- 1428 other : openmc.Tally 1429 The tally on the right hand side of the hybrid product 1430 binary_op : {'+', '-', '*', '/', '^'} 1431 The binary operation in the hybrid product 1432 filter_product : {'tensor', 'entrywise' or None} 1433 The type of product (tensor or entrywise) to be performed between 1434 filter data. The default is the entrywise product. Currently only 1435 the entrywise product is supported since a tally cannot contain 1436 two of the same filter. 1437 nuclide_product : {'tensor', 'entrywise' or None} 1438 The type of product (tensor or entrywise) to be performed between 1439 nuclide data. The default is the entrywise product if all nuclides 1440 between the two tallies are the same; otherwise the default is 1441 the tensor product. 1442 score_product : {'tensor', 'entrywise' or None} 1443 The type of product (tensor or entrywise) to be performed between 1444 score data. The default is the entrywise product if all scores 1445 between the two tallies are the same; otherwise the default is 1446 the tensor product. 1447 1448 Returns 1449 ------- 1450 openmc.Tally 1451 A new Tally that is the hybrid product with this one. 1452 1453 Raises 1454 ------ 1455 ValueError 1456 When this method is called before the other tally is populated 1457 with data. 1458 1459 """ 1460 1461 # Set default value for filter product if it was not set 1462 if filter_product is None: 1463 filter_product = 'entrywise' 1464 elif filter_product == 'tensor': 1465 msg = 'Unable to perform Tally arithmetic with a tensor product' \ 1466 'for the filter data as this is not currently supported.' 1467 raise ValueError(msg) 1468 1469 # Set default value for nuclide product if it was not set 1470 if nuclide_product is None: 1471 if self.nuclides == other.nuclides: 1472 nuclide_product = 'entrywise' 1473 else: 1474 nuclide_product = 'tensor' 1475 1476 # Set default value for score product if it was not set 1477 if score_product is None: 1478 if self.scores == other.scores: 1479 score_product = 'entrywise' 1480 else: 1481 score_product = 'tensor' 1482 1483 # Check product types 1484 cv.check_value('filter product', filter_product, _PRODUCT_TYPES) 1485 cv.check_value('nuclide product', nuclide_product, _PRODUCT_TYPES) 1486 cv.check_value('score product', score_product, _PRODUCT_TYPES) 1487 1488 # Check that results have been read 1489 if not other.derived and other.sum is None: 1490 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 1491 'since it does not contain any results.'.format(other.id) 1492 raise ValueError(msg) 1493 1494 new_tally = Tally() 1495 new_tally._derived = True 1496 new_tally.with_batch_statistics = True 1497 new_tally._num_realizations = self.num_realizations 1498 new_tally._estimator = self.estimator 1499 new_tally._with_summary = self.with_summary 1500 new_tally._sp_filename = self._sp_filename 1501 1502 # Construct a combined derived name from the two tally operands 1503 if self.name != '' and other.name != '': 1504 new_name = '({} {} {})'.format(self.name, binary_op, other.name) 1505 new_tally.name = new_name 1506 1507 # Query the mean and std dev so the tally data is read in from file 1508 # if it has not already been read in. 1509 self.mean, self.std_dev, other.mean, other.std_dev 1510 1511 # Create copies of self and other tallies to rearrange for tally 1512 # arithmetic 1513 self_copy = copy.deepcopy(self) 1514 other_copy = copy.deepcopy(other) 1515 1516 self_copy.sparse = False 1517 other_copy.sparse = False 1518 1519 # Align the tally data based on desired hybrid product 1520 data = self_copy._align_tally_data(other_copy, filter_product, 1521 nuclide_product, score_product) 1522 1523 # Perform tally arithmetic operation 1524 if binary_op == '+': 1525 new_tally._mean = data['self']['mean'] + data['other']['mean'] 1526 new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 + 1527 data['other']['std. dev.']**2) 1528 elif binary_op == '-': 1529 new_tally._mean = data['self']['mean'] - data['other']['mean'] 1530 new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 + 1531 data['other']['std. dev.']**2) 1532 elif binary_op == '*': 1533 with np.errstate(divide='ignore', invalid='ignore'): 1534 self_rel_err = data['self']['std. dev.'] / data['self']['mean'] 1535 other_rel_err = data['other']['std. dev.'] / data['other']['mean'] 1536 new_tally._mean = data['self']['mean'] * data['other']['mean'] 1537 new_tally._std_dev = np.abs(new_tally.mean) * \ 1538 np.sqrt(self_rel_err**2 + other_rel_err**2) 1539 elif binary_op == '/': 1540 with np.errstate(divide='ignore', invalid='ignore'): 1541 self_rel_err = data['self']['std. dev.'] / data['self']['mean'] 1542 other_rel_err = data['other']['std. dev.'] / data['other']['mean'] 1543 new_tally._mean = data['self']['mean'] / data['other']['mean'] 1544 new_tally._std_dev = np.abs(new_tally.mean) * \ 1545 np.sqrt(self_rel_err**2 + other_rel_err**2) 1546 elif binary_op == '^': 1547 with np.errstate(divide='ignore', invalid='ignore'): 1548 mean_ratio = data['other']['mean'] / data['self']['mean'] 1549 first_term = mean_ratio * data['self']['std. dev.'] 1550 second_term = \ 1551 np.log(data['self']['mean']) * data['other']['std. dev.'] 1552 new_tally._mean = data['self']['mean'] ** data['other']['mean'] 1553 new_tally._std_dev = np.abs(new_tally.mean) * \ 1554 np.sqrt(first_term**2 + second_term**2) 1555 1556 # Convert any infs and nans to zero 1557 new_tally._mean[np.isinf(new_tally._mean)] = 0 1558 new_tally._mean = np.nan_to_num(new_tally._mean) 1559 new_tally._std_dev[np.isinf(new_tally._std_dev)] = 0 1560 new_tally._std_dev = np.nan_to_num(new_tally._std_dev) 1561 1562 # Set tally attributes 1563 if self_copy.estimator == other_copy.estimator: 1564 new_tally.estimator = self_copy.estimator 1565 if self_copy.with_summary and other_copy.with_summary: 1566 new_tally.with_summary = self_copy.with_summary 1567 if self_copy.num_realizations == other_copy.num_realizations: 1568 new_tally.num_realizations = self_copy.num_realizations 1569 1570 # Add filters to the new tally 1571 if filter_product == 'entrywise': 1572 for self_filter in self_copy.filters: 1573 new_tally.filters.append(self_filter) 1574 else: 1575 all_filters = [self_copy.filters, other_copy.filters] 1576 for self_filter, other_filter in product(*all_filters): 1577 new_filter = openmc.CrossFilter(self_filter, other_filter, 1578 binary_op) 1579 new_tally.filters.append(new_filter) 1580 1581 # Add nuclides to the new tally 1582 if nuclide_product == 'entrywise': 1583 for self_nuclide in self_copy.nuclides: 1584 new_tally.nuclides.append(self_nuclide) 1585 else: 1586 all_nuclides = [self_copy.nuclides, other_copy.nuclides] 1587 for self_nuclide, other_nuclide in product(*all_nuclides): 1588 new_nuclide = openmc.CrossNuclide(self_nuclide, other_nuclide, 1589 binary_op) 1590 new_tally.nuclides.append(new_nuclide) 1591 1592 # Define helper function that handles score units appropriately 1593 # depending on the binary operator 1594 def cross_score(score1, score2, binary_op): 1595 if binary_op == '+' or binary_op == '-': 1596 if score1 == score2: 1597 return score1 1598 else: 1599 return openmc.CrossScore(score1, score2, binary_op) 1600 else: 1601 return openmc.CrossScore(score1, score2, binary_op) 1602 1603 # Add scores to the new tally 1604 if score_product == 'entrywise': 1605 for self_score in self_copy.scores: 1606 new_score = cross_score(self_score, self_score, binary_op) 1607 new_tally.scores.append(new_score) 1608 else: 1609 all_scores = [self_copy.scores, other_copy.scores] 1610 for self_score, other_score in product(*all_scores): 1611 new_score = cross_score(self_score, other_score, binary_op) 1612 new_tally.scores.append(new_score) 1613 1614 return new_tally 1615 1616 @property 1617 def filter_strides(self): 1618 all_strides = [] 1619 stride = self.num_nuclides * self.num_scores 1620 for self_filter in reversed(self.filters): 1621 all_strides.append(stride) 1622 stride *= self_filter.num_bins 1623 return all_strides[::-1] 1624 1625 def _align_tally_data(self, other, filter_product, nuclide_product, 1626 score_product): 1627 """Aligns data from two tallies for tally arithmetic. 1628 1629 This is a helper method to construct a dict of dicts of the "aligned" 1630 data arrays from each tally for tally arithmetic. The method analyzes 1631 the filters, scores and nuclides in both tallies and determines how to 1632 appropriately align the data for vectorized arithmetic. For example, 1633 if the two tallies have different filters, this method will use NumPy 1634 'tile' and 'repeat' operations to the new data arrays such that all 1635 possible combinations of the data in each tally's bins will be made 1636 when the arithmetic operation is applied to the arrays. 1637 1638 Parameters 1639 ---------- 1640 other : openmc.Tally 1641 The tally to outer product with this tally 1642 filter_product : {'entrywise'} 1643 The type of product to be performed between filter data. Currently, 1644 only the entrywise product is supported for the filter product. 1645 nuclide_product : {'tensor', 'entrywise'} 1646 The type of product (tensor or entrywise) to be performed between 1647 nuclide data. 1648 score_product : {'tensor', 'entrywise'} 1649 The type of product (tensor or entrywise) to be performed between 1650 score data. 1651 1652 Returns 1653 ------- 1654 dict 1655 A dictionary of dictionaries to "aligned" 'mean' and 'std. dev' 1656 NumPy arrays for each tally's data. 1657 1658 """ 1659 1660 # Get the set of filters that each tally is missing 1661 other_missing_filters = set(self.filters) - set(other.filters) 1662 self_missing_filters = set(other.filters) - set(self.filters) 1663 1664 # Add filters present in self but not in other to other 1665 for other_filter in other_missing_filters: 1666 filter_copy = copy.deepcopy(other_filter) 1667 other._mean = np.repeat(other.mean, filter_copy.num_bins, axis=0) 1668 other._std_dev = np.repeat(other.std_dev, filter_copy.num_bins, axis=0) 1669 other.filters.append(filter_copy) 1670 1671 # Add filters present in other but not in self to self 1672 for self_filter in self_missing_filters: 1673 filter_copy = copy.deepcopy(self_filter) 1674 self._mean = np.repeat(self.mean, filter_copy.num_bins, axis=0) 1675 self._std_dev = np.repeat(self.std_dev, filter_copy.num_bins, axis=0) 1676 self.filters.append(filter_copy) 1677 1678 # Align other filters with self filters 1679 for i, self_filter in enumerate(self.filters): 1680 other_index = other.filters.index(self_filter) 1681 1682 # If necessary, swap other filter 1683 if other_index != i: 1684 other._swap_filters(self_filter, other.filters[i]) 1685 1686 # Repeat and tile the data by nuclide in preparation for performing 1687 # the tensor product across nuclides. 1688 if nuclide_product == 'tensor': 1689 self._mean = np.repeat(self.mean, other.num_nuclides, axis=1) 1690 self._std_dev = np.repeat(self.std_dev, other.num_nuclides, axis=1) 1691 other._mean = np.tile(other.mean, (1, self.num_nuclides, 1)) 1692 other._std_dev = np.tile(other.std_dev, (1, self.num_nuclides, 1)) 1693 1694 # Add nuclides to each tally such that each tally contains the complete 1695 # set of nuclides necessary to perform an entrywise product. New 1696 # nuclides added to a tally will have all their scores set to zero. 1697 else: 1698 1699 # Get the set of nuclides that each tally is missing 1700 other_missing_nuclides = set(self.nuclides) - set(other.nuclides) 1701 self_missing_nuclides = set(other.nuclides) - set(self.nuclides) 1702 1703 # Add nuclides present in self but not in other to other 1704 for nuclide in other_missing_nuclides: 1705 other._mean = np.insert(other.mean, other.num_nuclides, 0, axis=1) 1706 other._std_dev = np.insert(other.std_dev, other.num_nuclides, 0, 1707 axis=1) 1708 other.nuclides.append(nuclide) 1709 1710 # Add nuclides present in other but not in self to self 1711 for nuclide in self_missing_nuclides: 1712 self._mean = np.insert(self.mean, self.num_nuclides, 0, axis=1) 1713 self._std_dev = np.insert(self.std_dev, self.num_nuclides, 0, 1714 axis=1) 1715 self.nuclides.append(nuclide) 1716 1717 # Align other nuclides with self nuclides 1718 for i, nuclide in enumerate(self.nuclides): 1719 other_index = other.get_nuclide_index(nuclide) 1720 1721 # If necessary, swap other nuclide 1722 if other_index != i: 1723 other._swap_nuclides(nuclide, other.nuclides[i]) 1724 1725 # Repeat and tile the data by score in preparation for performing 1726 # the tensor product across scores. 1727 if score_product == 'tensor': 1728 self._mean = np.repeat(self.mean, other.num_scores, axis=2) 1729 self._std_dev = np.repeat(self.std_dev, other.num_scores, axis=2) 1730 other._mean = np.tile(other.mean, (1, 1, self.num_scores)) 1731 other._std_dev = np.tile(other.std_dev, (1, 1, self.num_scores)) 1732 1733 # Add scores to each tally such that each tally contains the complete set 1734 # of scores necessary to perform an entrywise product. New scores added 1735 # to a tally will be set to zero. 1736 else: 1737 1738 # Get the set of scores that each tally is missing 1739 other_missing_scores = set(self.scores) - set(other.scores) 1740 self_missing_scores = set(other.scores) - set(self.scores) 1741 1742 # Add scores present in self but not in other to other 1743 for score in other_missing_scores: 1744 other._mean = np.insert(other.mean, other.num_scores, 0, axis=2) 1745 other._std_dev = np.insert(other.std_dev, other.num_scores, 0, axis=2) 1746 other.scores.append(score) 1747 1748 # Add scores present in other but not in self to self 1749 for score in self_missing_scores: 1750 self._mean = np.insert(self.mean, self.num_scores, 0, axis=2) 1751 self._std_dev = np.insert(self.std_dev, self.num_scores, 0, axis=2) 1752 self.scores.append(score) 1753 1754 # Align other scores with self scores 1755 for i, score in enumerate(self.scores): 1756 other_index = other.scores.index(score) 1757 1758 # If necessary, swap other score 1759 if other_index != i: 1760 other._swap_scores(score, other.scores[i]) 1761 1762 data = {} 1763 data['self'] = {} 1764 data['other'] = {} 1765 data['self']['mean'] = self.mean 1766 data['other']['mean'] = other.mean 1767 data['self']['std. dev.'] = self.std_dev 1768 data['other']['std. dev.'] = other.std_dev 1769 return data 1770 1771 def _swap_filters(self, filter1, filter2): 1772 """Reverse the ordering of two filters in this tally 1773 1774 This is a helper method for tally arithmetic which helps align the data 1775 in two tallies with shared filters. This method reverses the order of 1776 the two filters in place. 1777 1778 Parameters 1779 ---------- 1780 filter1 : Filter 1781 The filter to swap with filter2 1782 filter2 : Filter 1783 The filter to swap with filter1 1784 1785 Raises 1786 ------ 1787 ValueError 1788 If this is a derived tally or this method is called before the tally 1789 is populated with data. 1790 1791 """ 1792 1793 cv.check_type('filter1', filter1, _FILTER_CLASSES) 1794 cv.check_type('filter2', filter2, _FILTER_CLASSES) 1795 1796 # Check that the filters exist in the tally and are not the same 1797 if filter1 == filter2: 1798 return 1799 elif filter1 not in self.filters: 1800 msg = 'Unable to swap "{}" filter1 in Tally ID="{}" since it ' \ 1801 'does not contain such a filter'.format(filter1.type, self.id) 1802 raise ValueError(msg) 1803 elif filter2 not in self.filters: 1804 msg = 'Unable to swap "{}" filter2 in Tally ID="{}" since it ' \ 1805 'does not contain such a filter'.format(filter2.type, self.id) 1806 raise ValueError(msg) 1807 1808 # Construct lists of tuples for the bins in each of the two filters 1809 filters = [type(filter1), type(filter2)] 1810 if isinstance(filter1, openmc.DistribcellFilter): 1811 filter1_bins = [b for b in range(filter1.num_bins)] 1812 elif isinstance(filter1, openmc.EnergyFunctionFilter): 1813 filter1_bins = [None] 1814 else: 1815 filter1_bins = filter1.bins 1816 1817 if isinstance(filter2, openmc.DistribcellFilter): 1818 filter2_bins = [b for b in range(filter2.num_bins)] 1819 elif isinstance(filter2, openmc.EnergyFunctionFilter): 1820 filter2_bins = [None] 1821 else: 1822 filter2_bins = filter2.bins 1823 1824 # Create variables to store views of data in the misaligned structure 1825 mean = {} 1826 std_dev = {} 1827 1828 # Store the data from the misaligned structure 1829 for i, (bin1, bin2) in enumerate(product(filter1_bins, filter2_bins)): 1830 filter_bins = [(bin1,), (bin2,)] 1831 1832 if self.mean is not None: 1833 mean[i] = self.get_values( 1834 filters=filters, filter_bins=filter_bins, value='mean') 1835 1836 if self.std_dev is not None: 1837 std_dev[i] = self.get_values( 1838 filters=filters, filter_bins=filter_bins, value='std_dev') 1839 1840 # Swap the filters in the copied version of this Tally 1841 filter1_index = self.filters.index(filter1) 1842 filter2_index = self.filters.index(filter2) 1843 self.filters[filter1_index] = filter2 1844 self.filters[filter2_index] = filter1 1845 1846 # Realign the data 1847 for i, (bin1, bin2) in enumerate(product(filter1_bins, filter2_bins)): 1848 filter_bins = [(bin1,), (bin2,)] 1849 indices = self.get_filter_indices(filters, filter_bins) 1850 1851 if self.mean is not None: 1852 self.mean[indices, :, :] = mean[i] 1853 1854 if self.std_dev is not None: 1855 self.std_dev[indices, :, :] = std_dev[i] 1856 1857 def _swap_nuclides(self, nuclide1, nuclide2): 1858 """Reverse the ordering of two nuclides in this tally 1859 1860 This is a helper method for tally arithmetic which helps align the data 1861 in two tallies with shared nuclides. This method reverses the order of 1862 the two nuclides in place. 1863 1864 Parameters 1865 ---------- 1866 nuclide1 : Nuclide 1867 The nuclide to swap with nuclide2 1868 1869 nuclide2 : Nuclide 1870 The nuclide to swap with nuclide1 1871 1872 Raises 1873 ------ 1874 ValueError 1875 If this is a derived tally or this method is called before the tally 1876 is populated with data. 1877 1878 """ 1879 1880 # Check that results have been read 1881 if not self.derived and self.sum is None: 1882 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 1883 'since it does not contain any results.'.format(self.id) 1884 raise ValueError(msg) 1885 1886 cv.check_type('nuclide1', nuclide1, _NUCLIDE_CLASSES) 1887 cv.check_type('nuclide2', nuclide2, _NUCLIDE_CLASSES) 1888 1889 # Check that the nuclides exist in the tally and are not the same 1890 if nuclide1 == nuclide2: 1891 msg = 'Unable to swap a nuclide with itself' 1892 raise ValueError(msg) 1893 elif nuclide1 not in self.nuclides: 1894 msg = 'Unable to swap nuclide1 "{}" in Tally ID="{}" since it ' \ 1895 'does not contain such a nuclide'\ 1896 .format(nuclide1.name, self.id) 1897 raise ValueError(msg) 1898 elif nuclide2 not in self.nuclides: 1899 msg = 'Unable to swap "{}" nuclide2 in Tally ID="{}" since it ' \ 1900 'does not contain such a nuclide'\ 1901 .format(nuclide2.name, self.id) 1902 raise ValueError(msg) 1903 1904 # Swap the nuclides in the Tally 1905 nuclide1_index = self.get_nuclide_index(nuclide1) 1906 nuclide2_index = self.get_nuclide_index(nuclide2) 1907 self.nuclides[nuclide1_index] = nuclide2 1908 self.nuclides[nuclide2_index] = nuclide1 1909 1910 # Adjust the mean data array to relect the new nuclide order 1911 if self.mean is not None: 1912 nuclide1_mean = self.mean[:, nuclide1_index, :].copy() 1913 nuclide2_mean = self.mean[:, nuclide2_index, :].copy() 1914 self.mean[:, nuclide2_index, :] = nuclide1_mean 1915 self.mean[:, nuclide1_index, :] = nuclide2_mean 1916 1917 # Adjust the std_dev data array to relect the new nuclide order 1918 if self.std_dev is not None: 1919 nuclide1_std_dev = self.std_dev[:, nuclide1_index, :].copy() 1920 nuclide2_std_dev = self.std_dev[:, nuclide2_index, :].copy() 1921 self.std_dev[:, nuclide2_index, :] = nuclide1_std_dev 1922 self.std_dev[:, nuclide1_index, :] = nuclide2_std_dev 1923 1924 def _swap_scores(self, score1, score2): 1925 """Reverse the ordering of two scores in this tally 1926 1927 This is a helper method for tally arithmetic which helps align the data 1928 in two tallies with shared scores. This method reverses the order 1929 of the two scores in place. 1930 1931 Parameters 1932 ---------- 1933 score1 : str or CrossScore 1934 The score to swap with score2 1935 1936 score2 : str or CrossScore 1937 The score to swap with score1 1938 1939 Raises 1940 ------ 1941 ValueError 1942 If this is a derived tally or this method is called before the tally 1943 is populated with data. 1944 1945 """ 1946 1947 # Check that results have been read 1948 if not self.derived and self.sum is None: 1949 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 1950 'since it does not contain any results.'.format(self.id) 1951 raise ValueError(msg) 1952 1953 # Check that the scores are valid 1954 if not isinstance(score1, (str, openmc.CrossScore)): 1955 msg = 'Unable to swap score1 "{}" in Tally ID="{}" since it is ' \ 1956 'not a string or CrossScore'.format(score1, self.id) 1957 raise ValueError(msg) 1958 elif not isinstance(score2, (str, openmc.CrossScore)): 1959 msg = 'Unable to swap score2 "{}" in Tally ID="{}" since it is ' \ 1960 'not a string or CrossScore'.format(score2, self.id) 1961 raise ValueError(msg) 1962 1963 # Check that the scores exist in the tally and are not the same 1964 if score1 == score2: 1965 msg = 'Unable to swap a score with itself' 1966 raise ValueError(msg) 1967 elif score1 not in self.scores: 1968 msg = 'Unable to swap score1 "{}" in Tally ID="{}" since it ' \ 1969 'does not contain such a score'.format(score1, self.id) 1970 raise ValueError(msg) 1971 elif score2 not in self.scores: 1972 msg = 'Unable to swap score2 "{}" in Tally ID="{}" since it ' \ 1973 'does not contain such a score'.format(score2, self.id) 1974 raise ValueError(msg) 1975 1976 # Swap the scores in the Tally 1977 score1_index = self.get_score_index(score1) 1978 score2_index = self.get_score_index(score2) 1979 self.scores[score1_index] = score2 1980 self.scores[score2_index] = score1 1981 1982 # Adjust the mean data array to relect the new nuclide order 1983 if self.mean is not None: 1984 score1_mean = self.mean[:, :, score1_index].copy() 1985 score2_mean = self.mean[:, :, score2_index].copy() 1986 self.mean[:, :, score2_index] = score1_mean 1987 self.mean[:, :, score1_index] = score2_mean 1988 1989 # Adjust the std_dev data array to relect the new nuclide order 1990 if self.std_dev is not None: 1991 score1_std_dev = self.std_dev[:, :, score1_index].copy() 1992 score2_std_dev = self.std_dev[:, :, score2_index].copy() 1993 self.std_dev[:, :, score2_index] = score1_std_dev 1994 self.std_dev[:, :, score1_index] = score2_std_dev 1995 1996 def __add__(self, other): 1997 """Adds this tally to another tally or scalar value. 1998 1999 This method builds a new tally with data that is the sum of this 2000 tally's data and that from the other tally or scalar value. If the 2001 filters, scores and nuclides in the two tallies are not the same, then 2002 they are combined in all possible ways in the new derived tally. 2003 2004 Uncertainty propagation is used to compute the standard deviation 2005 for the new tally's data. It is important to note that this makes 2006 the assumption that the tally data is independently distributed. 2007 In most use cases, this is *not* true and may lead to under-prediction 2008 of the uncertainty. The uncertainty propagation model is from the 2009 following source: 2010 2011 https://en.wikipedia.org/wiki/Propagation_of_uncertainty 2012 2013 Parameters 2014 ---------- 2015 other : openmc.Tally or float 2016 The tally or scalar value to add to this tally 2017 2018 Returns 2019 ------- 2020 openmc.Tally 2021 A new derived tally which is the sum of this tally and the other 2022 tally or scalar value in the addition. 2023 2024 Raises 2025 ------ 2026 ValueError 2027 When this method is called before the Tally is populated with data. 2028 2029 """ 2030 2031 # Check that results have been read 2032 if not self.derived and self.sum is None: 2033 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 2034 'since it does not contain any results.'.format(self.id) 2035 raise ValueError(msg) 2036 2037 if isinstance(other, Tally): 2038 new_tally = self.hybrid_product(other, binary_op='+') 2039 2040 # If both tally operands were sparse, sparsify the new tally 2041 if self.sparse and other.sparse: 2042 new_tally.sparse = True 2043 2044 elif isinstance(other, Real): 2045 new_tally = Tally(name='derived') 2046 new_tally._derived = True 2047 new_tally.with_batch_statistics = True 2048 new_tally.name = self.name 2049 new_tally._mean = self.mean + other 2050 new_tally._std_dev = self.std_dev 2051 new_tally.estimator = self.estimator 2052 new_tally.with_summary = self.with_summary 2053 new_tally.num_realizations = self.num_realizations 2054 2055 new_tally.filters = copy.deepcopy(self.filters) 2056 new_tally.nuclides = copy.deepcopy(self.nuclides) 2057 new_tally.scores = copy.deepcopy(self.scores) 2058 2059 # If this tally operand is sparse, sparsify the new tally 2060 new_tally.sparse = self.sparse 2061 2062 else: 2063 msg = 'Unable to add "{}" to Tally ID="{}"'.format(other, self.id) 2064 raise ValueError(msg) 2065 2066 return new_tally 2067 2068 def __sub__(self, other): 2069 """Subtracts another tally or scalar value from this tally. 2070 2071 This method builds a new tally with data that is the difference of 2072 this tally's data and that from the other tally or scalar value. If the 2073 filters, scores and nuclides in the two tallies are not the same, then 2074 they are combined in all possible ways in the new derived tally. 2075 2076 Uncertainty propagation is used to compute the standard deviation 2077 for the new tally's data. It is important to note that this makes 2078 the assumption that the tally data is independently distributed. 2079 In most use cases, this is *not* true and may lead to under-prediction 2080 of the uncertainty. The uncertainty propagation model is from the 2081 following source: 2082 2083 https://en.wikipedia.org/wiki/Propagation_of_uncertainty 2084 2085 Parameters 2086 ---------- 2087 other : openmc.Tally or float 2088 The tally or scalar value to subtract from this tally 2089 2090 Returns 2091 ------- 2092 openmc.Tally 2093 A new derived tally which is the difference of this tally and the 2094 other tally or scalar value in the subtraction. 2095 2096 Raises 2097 ------ 2098 ValueError 2099 When this method is called before the Tally is populated with data. 2100 2101 """ 2102 2103 # Check that results have been read 2104 if not self.derived and self.sum is None: 2105 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 2106 'since it does not contain any results.'.format(self.id) 2107 raise ValueError(msg) 2108 2109 if isinstance(other, Tally): 2110 new_tally = self.hybrid_product(other, binary_op='-') 2111 2112 # If both tally operands were sparse, sparsify the new tally 2113 if self.sparse and other.sparse: 2114 new_tally.sparse = True 2115 2116 elif isinstance(other, Real): 2117 new_tally = Tally(name='derived') 2118 new_tally._derived = True 2119 new_tally.name = self.name 2120 new_tally._mean = self.mean - other 2121 new_tally._std_dev = self.std_dev 2122 new_tally.estimator = self.estimator 2123 new_tally.with_summary = self.with_summary 2124 new_tally.num_realizations = self.num_realizations 2125 2126 new_tally.filters = copy.deepcopy(self.filters) 2127 new_tally.nuclides = copy.deepcopy(self.nuclides) 2128 new_tally.scores = copy.deepcopy(self.scores) 2129 2130 # If this tally operand is sparse, sparsify the new tally 2131 new_tally.sparse = self.sparse 2132 2133 else: 2134 msg = 'Unable to subtract "{}" from Tally ID="{}"'.format(other, self.id) 2135 raise ValueError(msg) 2136 2137 return new_tally 2138 2139 def __mul__(self, other): 2140 """Multiplies this tally with another tally or scalar value. 2141 2142 This method builds a new tally with data that is the product of 2143 this tally's data and that from the other tally or scalar value. If the 2144 filters, scores and nuclides in the two tallies are not the same, then 2145 they are combined in all possible ways in the new derived tally. 2146 2147 Uncertainty propagation is used to compute the standard deviation 2148 for the new tally's data. It is important to note that this makes 2149 the assumption that the tally data is independently distributed. 2150 In most use cases, this is *not* true and may lead to under-prediction 2151 of the uncertainty. The uncertainty propagation model is from the 2152 following source: 2153 2154 https://en.wikipedia.org/wiki/Propagation_of_uncertainty 2155 2156 Parameters 2157 ---------- 2158 other : openmc.Tally or float 2159 The tally or scalar value to multiply with this tally 2160 2161 Returns 2162 ------- 2163 openmc.Tally 2164 A new derived tally which is the product of this tally and the 2165 other tally or scalar value in the multiplication. 2166 2167 Raises 2168 ------ 2169 ValueError 2170 When this method is called before the Tally is populated with data. 2171 2172 """ 2173 2174 # Check that results have been read 2175 if not self.derived and self.sum is None: 2176 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 2177 'since it does not contain any results.'.format(self.id) 2178 raise ValueError(msg) 2179 2180 if isinstance(other, Tally): 2181 new_tally = self.hybrid_product(other, binary_op='*') 2182 2183 # If original tally operands were sparse, sparsify the new tally 2184 if self.sparse and other.sparse: 2185 new_tally.sparse = True 2186 2187 elif isinstance(other, Real): 2188 new_tally = Tally(name='derived') 2189 new_tally._derived = True 2190 new_tally.name = self.name 2191 new_tally._mean = self.mean * other 2192 new_tally._std_dev = self.std_dev * np.abs(other) 2193 new_tally.estimator = self.estimator 2194 new_tally.with_summary = self.with_summary 2195 new_tally.num_realizations = self.num_realizations 2196 2197 new_tally.filters = copy.deepcopy(self.filters) 2198 new_tally.nuclides = copy.deepcopy(self.nuclides) 2199 new_tally.scores = copy.deepcopy(self.scores) 2200 2201 # If this tally operand is sparse, sparsify the new tally 2202 new_tally.sparse = self.sparse 2203 2204 else: 2205 msg = 'Unable to multiply Tally ID="{}" by "{}"'.format(self.id, other) 2206 raise ValueError(msg) 2207 2208 return new_tally 2209 2210 def __truediv__(self, other): 2211 """Divides this tally by another tally or scalar value. 2212 2213 This method builds a new tally with data that is the dividend of 2214 this tally's data and that from the other tally or scalar value. If the 2215 filters, scores and nuclides in the two tallies are not the same, then 2216 they are combined in all possible ways in the new derived tally. 2217 2218 Uncertainty propagation is used to compute the standard deviation 2219 for the new tally's data. It is important to note that this makes 2220 the assumption that the tally data is independently distributed. 2221 In most use cases, this is *not* true and may lead to under-prediction 2222 of the uncertainty. The uncertainty propagation model is from the 2223 following source: 2224 2225 https://en.wikipedia.org/wiki/Propagation_of_uncertainty 2226 2227 Parameters 2228 ---------- 2229 other : openmc.Tally or float 2230 The tally or scalar value to divide this tally by 2231 2232 Returns 2233 ------- 2234 openmc.Tally 2235 A new derived tally which is the dividend of this tally and the 2236 other tally or scalar value in the division. 2237 2238 Raises 2239 ------ 2240 ValueError 2241 When this method is called before the Tally is populated with data. 2242 2243 """ 2244 2245 # Check that results have been read 2246 if not self.derived and self.sum is None: 2247 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 2248 'since it does not contain any results.'.format(self.id) 2249 raise ValueError(msg) 2250 2251 if isinstance(other, Tally): 2252 new_tally = self.hybrid_product(other, binary_op='/') 2253 2254 # If original tally operands were sparse, sparsify the new tally 2255 if self.sparse and other.sparse: 2256 new_tally.sparse = True 2257 2258 elif isinstance(other, Real): 2259 new_tally = Tally(name='derived') 2260 new_tally._derived = True 2261 new_tally.name = self.name 2262 new_tally._mean = self.mean / other 2263 new_tally._std_dev = self.std_dev * np.abs(1. / other) 2264 new_tally.estimator = self.estimator 2265 new_tally.with_summary = self.with_summary 2266 new_tally.num_realizations = self.num_realizations 2267 2268 new_tally.filters = copy.deepcopy(self.filters) 2269 new_tally.nuclides = copy.deepcopy(self.nuclides) 2270 new_tally.scores = copy.deepcopy(self.scores) 2271 2272 # If this tally operand is sparse, sparsify the new tally 2273 new_tally.sparse = self.sparse 2274 2275 else: 2276 msg = 'Unable to divide Tally ID="{}" by "{}"'.format(self.id, other) 2277 raise ValueError(msg) 2278 2279 return new_tally 2280 2281 def __div__(self, other): 2282 return self.__truediv__(other) 2283 2284 def __pow__(self, power): 2285 """Raises this tally to another tally or scalar value power. 2286 2287 This method builds a new tally with data that is the power of 2288 this tally's data to that from the other tally or scalar value. If the 2289 filters, scores and nuclides in the two tallies are not the same, then 2290 they are combined in all possible ways in the new derived tally. 2291 2292 Uncertainty propagation is used to compute the standard deviation 2293 for the new tally's data. It is important to note that this makes 2294 the assumption that the tally data is independently distributed. 2295 In most use cases, this is *not* true and may lead to under-prediction 2296 of the uncertainty. The uncertainty propagation model is from the 2297 following source: 2298 2299 https://en.wikipedia.org/wiki/Propagation_of_uncertainty 2300 2301 Parameters 2302 ---------- 2303 power : openmc.Tally or float 2304 The tally or scalar value exponent 2305 2306 Returns 2307 ------- 2308 openmc.Tally 2309 A new derived tally which is this tally raised to the power of the 2310 other tally or scalar value in the exponentiation. 2311 2312 Raises 2313 ------ 2314 ValueError 2315 When this method is called before the Tally is populated with data. 2316 2317 """ 2318 2319 # Check that results have been read 2320 if not self.derived and self.sum is None: 2321 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 2322 'since it does not contain any results.'.format(self.id) 2323 raise ValueError(msg) 2324 2325 if isinstance(power, Tally): 2326 new_tally = self.hybrid_product(power, binary_op='^') 2327 2328 # If original tally operand was sparse, sparsify the new tally 2329 if self.sparse: 2330 new_tally.sparse = True 2331 2332 elif isinstance(power, Real): 2333 new_tally = Tally(name='derived') 2334 new_tally._derived = True 2335 new_tally.name = self.name 2336 new_tally._mean = self._mean ** power 2337 self_rel_err = self.std_dev / self.mean 2338 new_tally._std_dev = np.abs(new_tally._mean * power * self_rel_err) 2339 new_tally.estimator = self.estimator 2340 new_tally.with_summary = self.with_summary 2341 new_tally.num_realizations = self.num_realizations 2342 2343 new_tally.filters = copy.deepcopy(self.filters) 2344 new_tally.nuclides = copy.deepcopy(self.nuclides) 2345 new_tally.scores = copy.deepcopy(self.scores) 2346 2347 # If original tally was sparse, sparsify the exponentiated tally 2348 new_tally.sparse = self.sparse 2349 2350 else: 2351 msg = 'Unable to raise Tally ID="{}" to power "{}"'.format(self.id, power) 2352 raise ValueError(msg) 2353 2354 return new_tally 2355 2356 def __radd__(self, other): 2357 """Right addition with a scalar value. 2358 2359 This reverses the operands and calls the __add__ method. 2360 2361 Parameters 2362 ---------- 2363 other : float 2364 The scalar value to add to this tally 2365 2366 Returns 2367 ------- 2368 openmc.Tally 2369 A new derived tally of this tally added with the scalar value. 2370 2371 """ 2372 2373 return self + other 2374 2375 def __rsub__(self, other): 2376 """Right subtraction from a scalar value. 2377 2378 This reverses the operands and calls the __sub__ method. 2379 2380 Parameters 2381 ---------- 2382 other : float 2383 The scalar value to subtract this tally from 2384 2385 Returns 2386 ------- 2387 openmc.Tally 2388 A new derived tally of this tally subtracted from the scalar value. 2389 2390 """ 2391 2392 return -1. * self + other 2393 2394 def __rmul__(self, other): 2395 """Right multiplication with a scalar value. 2396 2397 This reverses the operands and calls the __mul__ method. 2398 2399 Parameters 2400 ---------- 2401 other : float 2402 The scalar value to multiply with this tally 2403 2404 Returns 2405 ------- 2406 openmc.Tally 2407 A new derived tally of this tally multiplied by the scalar value. 2408 2409 """ 2410 2411 return self * other 2412 2413 def __rdiv__(self, other): 2414 """Right division with a scalar value. 2415 2416 This reverses the operands and calls the __div__ method. 2417 2418 Parameters 2419 ---------- 2420 other : float 2421 The scalar value to divide by this tally 2422 2423 Returns 2424 ------- 2425 openmc.Tally 2426 A new derived tally of the scalar value divided by this tally. 2427 2428 """ 2429 2430 return other * self**-1 2431 2432 def __abs__(self): 2433 """The absolute value of this tally. 2434 2435 Returns 2436 ------- 2437 openmc.Tally 2438 A new derived tally which is the absolute value of this tally. 2439 2440 """ 2441 2442 new_tally = copy.deepcopy(self) 2443 new_tally._mean = np.abs(new_tally.mean) 2444 return new_tally 2445 2446 def __neg__(self): 2447 """The negated value of this tally. 2448 2449 Returns 2450 ------- 2451 openmc.Tally 2452 A new derived tally which is the negated value of this tally. 2453 2454 """ 2455 2456 new_tally = self * -1 2457 return new_tally 2458 2459 def get_slice(self, scores=[], filters=[], filter_bins=[], nuclides=[], 2460 squeeze=False): 2461 """Build a sliced tally for the specified filters, scores and nuclides. 2462 2463 This method constructs a new tally to encapsulate a subset of the data 2464 represented by this tally. The subset of data to include in the tally 2465 slice is determined by the scores, filters and nuclides specified in 2466 the input parameters. 2467 2468 Parameters 2469 ---------- 2470 scores : list of str 2471 A list of one or more score strings (e.g., ['absorption', 2472 'nu-fission'] 2473 filters : Iterable of openmc.FilterMeta 2474 An iterable of filter types (e.g., [MeshFilter, EnergyFilter]) 2475 filter_bins : list of Iterables 2476 A list of iterables of filter bins corresponding to the specified 2477 filter types (e.g., [(1,), ((0., 0.625e-6),)]). Each iterable 2478 contains bins to slice for the corresponding filter type in the 2479 filters parameter. Each bin is the integer ID for 'material', 2480 'surface', 'cell', 'cellborn', and 'universe' Filters. Each bin is 2481 an integer for the cell instance ID for 'distribcell' Filters. Each 2482 bin is a 2-tuple of floats for 'energy' and 'energyout' filters 2483 corresponding to the energy boundaries of the bin of interest. The 2484 bin is an (x,y,z) 3-tuple for 'mesh' filters corresponding to the 2485 mesh cell of interest. The order of the bins in the list must 2486 correspond to the `filters` argument. 2487 nuclides : list of str 2488 A list of nuclide name strings (e.g., ['U235', 'U238']) 2489 squeeze : bool 2490 Whether to remove filters with only a single bin in the sliced tally 2491 2492 Returns 2493 ------- 2494 openmc.Tally 2495 A new tally which encapsulates the subset of data requested in the 2496 order each filter, nuclide and score is listed in the parameters. 2497 2498 Raises 2499 ------ 2500 ValueError 2501 When this method is called before the Tally is populated with data. 2502 2503 """ 2504 2505 # Ensure that the tally has data 2506 if not self.derived and self.sum is None: 2507 msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \ 2508 'since it does not contain any results.'.format(self.id) 2509 raise ValueError(msg) 2510 2511 # Create deep copy of tally to return as sliced tally 2512 new_tally = copy.deepcopy(self) 2513 new_tally._derived = True 2514 2515 # Differentiate Tally with a new auto-generated Tally ID 2516 new_tally.id = None 2517 2518 new_tally.sparse = False 2519 2520 if not self.derived and self.sum is not None: 2521 new_sum = self.get_values(scores, filters, filter_bins, 2522 nuclides, 'sum') 2523 new_tally.sum = new_sum 2524 if not self.derived and self.sum_sq is not None: 2525 new_sum_sq = self.get_values(scores, filters, filter_bins, 2526 nuclides, 'sum_sq') 2527 new_tally.sum_sq = new_sum_sq 2528 if self.mean is not None: 2529 new_mean = self.get_values(scores, filters, filter_bins, 2530 nuclides, 'mean') 2531 new_tally._mean = new_mean 2532 if self.std_dev is not None: 2533 new_std_dev = self.get_values(scores, filters, filter_bins, 2534 nuclides, 'std_dev') 2535 new_tally._std_dev = new_std_dev 2536 2537 # SCORES 2538 if scores: 2539 score_indices = [] 2540 2541 # Determine the score indices from any of the requested scores 2542 for score in self.scores: 2543 if score not in scores: 2544 score_index = self.get_score_index(score) 2545 score_indices.append(score_index) 2546 2547 # Loop over indices in reverse to remove excluded scores 2548 for score_index in reversed(score_indices): 2549 new_tally.remove_score(self.scores[score_index]) 2550 2551 # NUCLIDES 2552 if nuclides: 2553 nuclide_indices = [] 2554 2555 # Determine the nuclide indices from any of the requested nuclides 2556 for nuclide in self.nuclides: 2557 if nuclide.name not in nuclides: 2558 nuclide_index = self.get_nuclide_index(nuclide.name) 2559 nuclide_indices.append(nuclide_index) 2560 2561 # Loop over indices in reverse to remove excluded Nuclides 2562 for nuclide_index in reversed(nuclide_indices): 2563 new_tally.remove_nuclide(self.nuclides[nuclide_index]) 2564 2565 # FILTERS 2566 if filters: 2567 2568 # Determine the filter indices from any of the requested filters 2569 for i, filter_type in enumerate(filters): 2570 f = new_tally.find_filter(filter_type) 2571 2572 # Remove filters with only a single bin if requested 2573 if squeeze: 2574 if len(filter_bins[i]) == 1: 2575 new_tally.filters.remove(f) 2576 continue 2577 else: 2578 raise RuntimeError('Cannot remove sliced filter with ' 2579 'more than one bin.') 2580 2581 # Remove and/or reorder filter bins to user specifications 2582 bin_indices = [f.get_bin_index(b) 2583 for b in filter_bins[i]] 2584 bin_indices = np.unique(bin_indices) 2585 2586 # Set bins for sliced filter 2587 new_filter = copy.copy(f) 2588 new_filter.bins = [f.bins[i] for i in bin_indices] 2589 2590 # Set number of bins manually for mesh/distribcell filters 2591 if filter_type is openmc.DistribcellFilter: 2592 new_filter._num_bins = f._num_bins 2593 2594 # Replace existing filter with new one 2595 for j, test_filter in enumerate(new_tally.filters): 2596 if isinstance(test_filter, filter_type): 2597 new_tally.filters[j] = new_filter 2598 2599 # If original tally was sparse, sparsify the sliced tally 2600 new_tally.sparse = self.sparse 2601 return new_tally 2602 2603 def summation(self, scores=[], filter_type=None, 2604 filter_bins=[], nuclides=[], remove_filter=False): 2605 """Vectorized sum of tally data across scores, filter bins and/or 2606 nuclides using tally aggregation. 2607 2608 This method constructs a new tally to encapsulate the sum of the data 2609 represented by the summation of the data in this tally. The tally data 2610 sum is determined by the scores, filter bins and nuclides specified 2611 in the input parameters. 2612 2613 Parameters 2614 ---------- 2615 scores : list of str 2616 A list of one or more score strings to sum across 2617 (e.g., ['absorption', 'nu-fission']; default is []) 2618 filter_type : openmc.FilterMeta 2619 Type of the filter, e.g. MeshFilter 2620 filter_bins : Iterable of int or tuple 2621 A list of the filter bins corresponding to the filter_type parameter 2622 Each bin in the list is the integer ID for 'material', 'surface', 2623 'cell', 'cellborn', and 'universe' Filters. Each bin is an integer 2624 for the cell instance ID for 'distribcell' Filters. Each bin is a 2625 2-tuple of floats for 'energy' and 'energyout' filters corresponding 2626 to the energy boundaries of the bin of interest. Each bin is an 2627 (x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of 2628 interest. 2629 nuclides : list of str 2630 A list of nuclide name strings to sum across 2631 (e.g., ['U235', 'U238']; default is []) 2632 remove_filter : bool 2633 If a filter is being summed over, this bool indicates whether to 2634 remove that filter in the returned tally. Default is False. 2635 2636 Returns 2637 ------- 2638 openmc.Tally 2639 A new tally which encapsulates the sum of data requested. 2640 """ 2641 2642 # Create new derived Tally for summation 2643 tally_sum = Tally() 2644 tally_sum._derived = True 2645 tally_sum._estimator = self.estimator 2646 tally_sum._num_realizations = self.num_realizations 2647 tally_sum._with_batch_statistics = self.with_batch_statistics 2648 tally_sum._with_summary = self.with_summary 2649 tally_sum._sp_filename = self._sp_filename 2650 tally_sum._results_read = self._results_read 2651 2652 # Get tally data arrays reshaped with one dimension per filter 2653 mean = self.get_reshaped_data(value='mean') 2654 std_dev = self.get_reshaped_data(value='std_dev') 2655 2656 # Sum across any filter bins specified by the user 2657 if isinstance(filter_type, openmc.FilterMeta): 2658 find_filter = self.find_filter(filter_type) 2659 2660 # If user did not specify filter bins, sum across all bins 2661 if len(filter_bins) == 0: 2662 bin_indices = np.arange(find_filter.num_bins) 2663 2664 if isinstance(find_filter, openmc.DistribcellFilter): 2665 filter_bins = np.arange(find_filter.num_bins) 2666 elif isinstance(find_filter, openmc.EnergyFunctionFilter): 2667 filter_bins = [None] 2668 else: 2669 filter_bins = find_filter.bins 2670 2671 # Only sum across bins specified by the user 2672 else: 2673 bin_indices = \ 2674 [find_filter.get_bin_index(bin) for bin in filter_bins] 2675 2676 # Sum across the bins in the user-specified filter 2677 for i, self_filter in enumerate(self.filters): 2678 if type(self_filter) == filter_type: 2679 shape = mean.shape 2680 mean = np.take(mean, indices=bin_indices, axis=i) 2681 std_dev = np.take(std_dev, indices=bin_indices, axis=i) 2682 2683 # NumPy take introduces a new dimension in output array 2684 # for some special cases that must be removed 2685 if len(mean.shape) > len(shape): 2686 mean = np.squeeze(mean, axis=i) 2687 std_dev = np.squeeze(std_dev, axis=i) 2688 2689 mean = np.sum(mean, axis=i, keepdims=True) 2690 std_dev = np.sum(std_dev**2, axis=i, keepdims=True) 2691 std_dev = np.sqrt(std_dev) 2692 2693 # Add AggregateFilter to the tally sum 2694 if not remove_filter: 2695 filter_sum = openmc.AggregateFilter(self_filter, 2696 [tuple(filter_bins)], 'sum') 2697 tally_sum.filters.append(filter_sum) 2698 2699 # Add a copy of each filter not summed across to the tally sum 2700 else: 2701 tally_sum.filters.append(copy.deepcopy(self_filter)) 2702 2703 # Add a copy of this tally's filters to the tally sum 2704 else: 2705 tally_sum._filters = copy.deepcopy(self.filters) 2706 2707 # Sum across any nuclides specified by the user 2708 if len(nuclides) != 0: 2709 nuclide_bins = [self.get_nuclide_index(nuclide) for nuclide in nuclides] 2710 axis_index = self.num_filters 2711 mean = np.take(mean, indices=nuclide_bins, axis=axis_index) 2712 std_dev = np.take(std_dev, indices=nuclide_bins, axis=axis_index) 2713 mean = np.sum(mean, axis=axis_index, keepdims=True) 2714 std_dev = np.sum(std_dev**2, axis=axis_index, keepdims=True) 2715 std_dev = np.sqrt(std_dev) 2716 2717 # Add AggregateNuclide to the tally sum 2718 nuclide_sum = openmc.AggregateNuclide(nuclides, 'sum') 2719 tally_sum.nuclides.append(nuclide_sum) 2720 2721 # Add a copy of this tally's nuclides to the tally sum 2722 else: 2723 tally_sum._nuclides = copy.deepcopy(self.nuclides) 2724 2725 # Sum across any scores specified by the user 2726 if len(scores) != 0: 2727 score_bins = [self.get_score_index(score) for score in scores] 2728 axis_index = self.num_filters + 1 2729 mean = np.take(mean, indices=score_bins, axis=axis_index) 2730 std_dev = np.take(std_dev, indices=score_bins, axis=axis_index) 2731 mean = np.sum(mean, axis=axis_index, keepdims=True) 2732 std_dev = np.sum(std_dev**2, axis=axis_index, keepdims=True) 2733 std_dev = np.sqrt(std_dev) 2734 2735 # Add AggregateScore to the tally sum 2736 score_sum = openmc.AggregateScore(scores, 'sum') 2737 tally_sum.scores.append(score_sum) 2738 2739 # Add a copy of this tally's scores to the tally sum 2740 else: 2741 tally_sum._scores = copy.deepcopy(self.scores) 2742 2743 # Reshape condensed data arrays with one dimension for all filters 2744 mean = np.reshape(mean, tally_sum.shape) 2745 std_dev = np.reshape(std_dev, tally_sum.shape) 2746 2747 # Assign tally sum's data with the new arrays 2748 tally_sum._mean = mean 2749 tally_sum._std_dev = std_dev 2750 2751 # If original tally was sparse, sparsify the tally summation 2752 tally_sum.sparse = self.sparse 2753 return tally_sum 2754 2755 def average(self, scores=[], filter_type=None, 2756 filter_bins=[], nuclides=[], remove_filter=False): 2757 """Vectorized average of tally data across scores, filter bins and/or 2758 nuclides using tally aggregation. 2759 2760 This method constructs a new tally to encapsulate the average of the 2761 data represented by the average of the data in this tally. The tally 2762 data average is determined by the scores, filter bins and nuclides 2763 specified in the input parameters. 2764 2765 Parameters 2766 ---------- 2767 scores : list of str 2768 A list of one or more score strings to average across 2769 (e.g., ['absorption', 'nu-fission']; default is []) 2770 filter_type : openmc.FilterMeta 2771 Type of the filter, e.g. MeshFilter 2772 filter_bins : Iterable of int or tuple 2773 A list of the filter bins corresponding to the filter_type parameter 2774 Each bin in the list is the integer ID for 'material', 'surface', 2775 'cell', 'cellborn', and 'universe' Filters. Each bin is an integer 2776 for the cell instance ID for 'distribcell' Filters. Each bin is a 2777 2-tuple of floats for 'energy' and 'energyout' filters corresponding 2778 to the energy boundaries of the bin of interest. Each bin is an 2779 (x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of 2780 interest. 2781 nuclides : list of str 2782 A list of nuclide name strings to average across 2783 (e.g., ['U235', 'U238']; default is []) 2784 remove_filter : bool 2785 If a filter is being averaged over, this bool indicates whether to 2786 remove that filter in the returned tally. Default is False. 2787 2788 Returns 2789 ------- 2790 openmc.Tally 2791 A new tally which encapsulates the average of data requested. 2792 """ 2793 2794 # Create new derived Tally for average 2795 tally_avg = Tally() 2796 tally_avg._derived = True 2797 tally_avg._estimator = self.estimator 2798 tally_avg._num_realizations = self.num_realizations 2799 tally_avg._with_batch_statistics = self.with_batch_statistics 2800 tally_avg._with_summary = self.with_summary 2801 tally_avg._sp_filename = self._sp_filename 2802 tally_avg._results_read = self._results_read 2803 2804 # Get tally data arrays reshaped with one dimension per filter 2805 mean = self.get_reshaped_data(value='mean') 2806 std_dev = self.get_reshaped_data(value='std_dev') 2807 2808 # Average across any filter bins specified by the user 2809 if isinstance(filter_type, openmc.FilterMeta): 2810 find_filter = self.find_filter(filter_type) 2811 2812 # If user did not specify filter bins, average across all bins 2813 if len(filter_bins) == 0: 2814 bin_indices = np.arange(find_filter.num_bins) 2815 2816 if isinstance(find_filter, openmc.DistribcellFilter): 2817 filter_bins = np.arange(find_filter.num_bins) 2818 elif isinstance(find_filter, openmc.EnergyFunctionFilter): 2819 filter_bins = [None] 2820 else: 2821 filter_bins = find_filter.bins 2822 2823 # Only average across bins specified by the user 2824 else: 2825 bin_indices = \ 2826 [find_filter.get_bin_index(bin) for bin in filter_bins] 2827 2828 # Average across the bins in the user-specified filter 2829 for i, self_filter in enumerate(self.filters): 2830 if isinstance(self_filter, filter_type): 2831 shape = mean.shape 2832 mean = np.take(mean, indices=bin_indices, axis=i) 2833 std_dev = np.take(std_dev, indices=bin_indices, axis=i) 2834 2835 # NumPy take introduces a new dimension in output array 2836 # for some special cases that must be removed 2837 if len(mean.shape) > len(shape): 2838 mean = np.squeeze(mean, axis=i) 2839 std_dev = np.squeeze(std_dev, axis=i) 2840 2841 mean = np.nanmean(mean, axis=i, keepdims=True) 2842 std_dev = np.nanmean(std_dev**2, axis=i, keepdims=True) 2843 std_dev /= len(bin_indices) 2844 std_dev = np.sqrt(std_dev) 2845 2846 # Add AggregateFilter to the tally avg 2847 if not remove_filter: 2848 filter_sum = openmc.AggregateFilter(self_filter, 2849 [tuple(filter_bins)], 'avg') 2850 tally_avg.filters.append(filter_sum) 2851 2852 # Add a copy of each filter not averaged across to the tally avg 2853 else: 2854 tally_avg.filters.append(copy.deepcopy(self_filter)) 2855 2856 # Add a copy of this tally's filters to the tally avg 2857 else: 2858 tally_avg._filters = copy.deepcopy(self.filters) 2859 2860 # Sum across any nuclides specified by the user 2861 if len(nuclides) != 0: 2862 nuclide_bins = [self.get_nuclide_index(nuclide) for nuclide in nuclides] 2863 axis_index = self.num_filters 2864 mean = np.take(mean, indices=nuclide_bins, axis=axis_index) 2865 std_dev = np.take(std_dev, indices=nuclide_bins, axis=axis_index) 2866 mean = np.nanmean(mean, axis=axis_index, keepdims=True) 2867 std_dev = np.nanmean(std_dev**2, axis=axis_index, keepdims=True) 2868 std_dev /= len(nuclide_bins) 2869 std_dev = np.sqrt(std_dev) 2870 2871 # Add AggregateNuclide to the tally avg 2872 nuclide_avg = openmc.AggregateNuclide(nuclides, 'avg') 2873 tally_avg.nuclides.append(nuclide_avg) 2874 2875 # Add a copy of this tally's nuclides to the tally avg 2876 else: 2877 tally_avg._nuclides = copy.deepcopy(self.nuclides) 2878 2879 # Sum across any scores specified by the user 2880 if len(scores) != 0: 2881 score_bins = [self.get_score_index(score) for score in scores] 2882 axis_index = self.num_filters + 1 2883 mean = np.take(mean, indices=score_bins, axis=axis_index) 2884 std_dev = np.take(std_dev, indices=score_bins, axis=axis_index) 2885 mean = np.nanmean(mean, axis=axis_index, keepdims=True) 2886 std_dev = np.nanmean(std_dev**2, axis=axis_index, keepdims=True) 2887 std_dev /= len(score_bins) 2888 std_dev = np.sqrt(std_dev) 2889 2890 # Add AggregateScore to the tally avg 2891 score_sum = openmc.AggregateScore(scores, 'avg') 2892 tally_avg.scores.append(score_sum) 2893 2894 # Add a copy of this tally's scores to the tally avg 2895 else: 2896 tally_avg._scores = copy.deepcopy(self.scores) 2897 2898 # Reshape condensed data arrays with one dimension for all filters 2899 mean = np.reshape(mean, tally_avg.shape) 2900 std_dev = np.reshape(std_dev, tally_avg.shape) 2901 2902 # Assign tally avg's data with the new arrays 2903 tally_avg._mean = mean 2904 tally_avg._std_dev = std_dev 2905 2906 # If original tally was sparse, sparsify the tally average 2907 tally_avg.sparse = self.sparse 2908 return tally_avg 2909 2910 def diagonalize_filter(self, new_filter, filter_position=-1): 2911 """Diagonalize the tally data array along a new axis of filter bins. 2912 2913 This is a helper method for the tally arithmetic methods. This method 2914 adds the new filter to a derived tally constructed copied from this one. 2915 The data in the derived tally arrays is "diagonalized" along the bins in 2916 the new filter. This functionality is used by the openmc.mgxs module; to 2917 transport-correct scattering matrices by subtracting a 'scatter-P1' 2918 reaction rate tally with an energy filter from a 'scatter' reaction 2919 rate tally with both energy and energyout filters. 2920 2921 Parameters 2922 ---------- 2923 new_filter : Filter 2924 The filter along which to diagonalize the data in the new 2925 filter_position : int 2926 Where to place the new filter in the Tally.filters list. Defaults 2927 to last position. 2928 2929 Returns 2930 ------- 2931 openmc.Tally 2932 A new derived Tally with data diagaonalized along the new filter. 2933 2934 """ 2935 2936 cv.check_type('new_filter', new_filter, _FILTER_CLASSES) 2937 cv.check_type('filter_position', filter_position, Integral) 2938 2939 if new_filter in self.filters: 2940 msg = 'Unable to diagonalize Tally ID="{}" which already ' \ 2941 'contains a "{}" filter'.format(self.id, type(new_filter)) 2942 raise ValueError(msg) 2943 2944 # Add the new filter to a copy of this Tally 2945 new_tally = copy.deepcopy(self) 2946 new_tally.filters.insert(filter_position, new_filter) 2947 2948 # Determine "base" indices along the new "diagonal", and the factor 2949 # by which the "base" indices should be repeated to account for all 2950 # other filter bins in the diagonalized tally 2951 indices = np.arange(0, new_filter.num_bins**2, new_filter.num_bins+1) 2952 diag_factor = self.num_filter_bins // new_filter.num_bins 2953 diag_indices = np.zeros(self.num_filter_bins, dtype=int) 2954 2955 # Determine the filter indices along the new "diagonal" 2956 for i in range(diag_factor): 2957 start = i * new_filter.num_bins 2958 end = (i+1) * new_filter.num_bins 2959 diag_indices[start:end] = indices + (i * new_filter.num_bins**2) 2960 2961 # Inject this Tally's data along the diagonal of the diagonalized Tally 2962 if not self.derived and self.sum is not None: 2963 new_tally._sum = np.zeros(new_tally.shape, dtype=np.float64) 2964 new_tally._sum[diag_indices, :, :] = self.sum 2965 if not self.derived and self.sum_sq is not None: 2966 new_tally._sum_sq = np.zeros(new_tally.shape, dtype=np.float64) 2967 new_tally._sum_sq[diag_indices, :, :] = self.sum_sq 2968 if self.mean is not None: 2969 new_tally._mean = np.zeros(new_tally.shape, dtype=np.float64) 2970 new_tally._mean[diag_indices, :, :] = self.mean 2971 if self.std_dev is not None: 2972 new_tally._std_dev = np.zeros(new_tally.shape, dtype=np.float64) 2973 new_tally._std_dev[diag_indices, :, :] = self.std_dev 2974 2975 # If original tally was sparse, sparsify the diagonalized tally 2976 new_tally.sparse = self.sparse 2977 return new_tally 2978 2979 2980class Tallies(cv.CheckedList): 2981 """Collection of Tallies used for an OpenMC simulation. 2982 2983 This class corresponds directly to the tallies.xml input file. It can be 2984 thought of as a normal Python list where each member is a :class:`Tally`. It 2985 behaves like a list as the following example demonstrates: 2986 2987 >>> t1 = openmc.Tally() 2988 >>> t2 = openmc.Tally() 2989 >>> t3 = openmc.Tally() 2990 >>> tallies = openmc.Tallies([t1]) 2991 >>> tallies.append(t2) 2992 >>> tallies += [t3] 2993 2994 Parameters 2995 ---------- 2996 tallies : Iterable of openmc.Tally 2997 Tallies to add to the collection 2998 2999 """ 3000 3001 def __init__(self, tallies=None): 3002 super().__init__(Tally, 'tallies collection') 3003 if tallies is not None: 3004 self += tallies 3005 3006 def append(self, tally, merge=False): 3007 """Append tally to collection 3008 3009 Parameters 3010 ---------- 3011 tally : openmc.Tally 3012 Tally to append 3013 merge : bool 3014 Indicate whether the tally should be merged with an existing tally, 3015 if possible. Defaults to False. 3016 3017 """ 3018 if not isinstance(tally, Tally): 3019 msg = 'Unable to add a non-Tally "{}" to the ' \ 3020 'Tallies instance'.format(tally) 3021 raise TypeError(msg) 3022 3023 if merge: 3024 merged = False 3025 3026 # Look for a tally to merge with this one 3027 for i, tally2 in enumerate(self): 3028 3029 # If a mergeable tally is found 3030 if tally2.can_merge(tally): 3031 # Replace tally2 with the merged tally 3032 merged_tally = tally2.merge(tally) 3033 self[i] = merged_tally 3034 merged = True 3035 break 3036 3037 # If no mergeable tally was found, simply add this tally 3038 if not merged: 3039 super().append(tally) 3040 3041 else: 3042 super().append(tally) 3043 3044 def insert(self, index, item): 3045 """Insert tally before index 3046 3047 Parameters 3048 ---------- 3049 index : int 3050 Index in list 3051 item : openmc.Tally 3052 Tally to insert 3053 3054 """ 3055 super().insert(index, item) 3056 3057 def merge_tallies(self): 3058 """Merge any mergeable tallies together. Note that n-way merges are 3059 possible. 3060 3061 """ 3062 3063 for i, tally1 in enumerate(self): 3064 for j, tally2 in enumerate(self): 3065 # Do not merge the same tally with itself 3066 if i == j: 3067 continue 3068 3069 # If the two tallies are mergeable 3070 if tally1.can_merge(tally2): 3071 # Replace tally 1 with the merged tally 3072 merged_tally = tally1.merge(tally2) 3073 self[i] = merged_tally 3074 3075 # Remove tally 2 since it is no longer needed 3076 self.pop(j) 3077 3078 # Continue iterating from the first loop 3079 break 3080 3081 def _create_tally_subelements(self, root_element): 3082 for tally in self: 3083 root_element.append(tally.to_xml_element()) 3084 3085 def _create_mesh_subelements(self, root_element): 3086 already_written = set() 3087 for tally in self: 3088 for f in tally.filters: 3089 if isinstance(f, openmc.MeshFilter): 3090 if f.mesh.id not in already_written: 3091 if len(f.mesh.name) > 0: 3092 root_element.append(ET.Comment(f.mesh.name)) 3093 3094 root_element.append(f.mesh.to_xml_element()) 3095 already_written.add(f.mesh.id) 3096 3097 def _create_filter_subelements(self, root_element): 3098 already_written = dict() 3099 for tally in self: 3100 for f in tally.filters: 3101 if f not in already_written: 3102 root_element.append(f.to_xml_element()) 3103 already_written[f] = f.id 3104 elif f.id != already_written[f]: 3105 # Set the IDs of identical filters with different 3106 # user-defined IDs to the same value 3107 f.id = already_written[f] 3108 3109 def _create_derivative_subelements(self, root_element): 3110 # Get a list of all derivatives referenced in a tally. 3111 derivs = [] 3112 for tally in self: 3113 deriv = tally.derivative 3114 if deriv is not None and deriv not in derivs: 3115 derivs.append(deriv) 3116 3117 # Add the derivatives to the XML tree. 3118 for d in derivs: 3119 root_element.append(d.to_xml_element()) 3120 3121 def export_to_xml(self, path='tallies.xml'): 3122 """Create a tallies.xml file that can be used for a simulation. 3123 3124 Parameters 3125 ---------- 3126 path : str 3127 Path to file to write. Defaults to 'tallies.xml'. 3128 3129 """ 3130 3131 root_element = ET.Element("tallies") 3132 self._create_mesh_subelements(root_element) 3133 self._create_filter_subelements(root_element) 3134 self._create_tally_subelements(root_element) 3135 self._create_derivative_subelements(root_element) 3136 3137 # Clean the indentation in the file to be user-readable 3138 clean_indentation(root_element) 3139 3140 # Check if path is a directory 3141 p = Path(path) 3142 if p.is_dir(): 3143 p /= 'tallies.xml' 3144 3145 # Write the XML Tree to the tallies.xml file 3146 reorder_attributes(root_element) # TODO: Remove when support is Python 3.8+ 3147 tree = ET.ElementTree(root_element) 3148 tree.write(str(p), xml_declaration=True, encoding='utf-8') 3149