[yt-svn] commit/yt: 5 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Thu Sep 11 12:00:15 PDT 2014
5 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/eab553baeebb/
Changeset: eab553baeebb
Branch: yt
User: ChrisMalone
Date: 2014-09-08 22:34:17
Summary: some pep8 fixes using flycheck; thanks a lot Nathan...
Affected #: 1 file
diff -r 7e2eca2dc0645480aa7ae82597074552b64082d6 -r eab553baeebb53defa0e715ee30be4c6b512928a yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1,5 +1,5 @@
"""
-Data structures for Boxlib Codes
+Data structures for Boxlib Codes
@@ -15,10 +15,8 @@
import os
import re
-import weakref
import itertools
-from collections import defaultdict
from stat import ST_CTIME
import numpy as np
@@ -27,53 +25,46 @@
from yt.data_objects.grid_patch import AMRGridPatch
from yt.geometry.grid_geometry_handler import GridIndex
from yt.data_objects.static_output import Dataset
-from yt.utilities.definitions import \
- mpc_conversion, sec_conversion
+
from yt.utilities.parallel_tools.parallel_analysis_interface import \
parallel_root_only
from yt.utilities.lib.misc_utilities import \
get_box_grids_level
-from yt.geometry.selection_routines import \
- RegionSelector
from yt.utilities.io_handler import \
io_registry
-from yt.utilities.physical_constants import \
- cm_per_mpc
from .fields import \
BoxlibFieldInfo, \
MaestroFieldInfo, \
CastroFieldInfo
-from .io import IOHandlerBoxlib
# This is what we use to find scientific notation that might include d's
# instead of e's.
_scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
# This is the dimensions in the Cell_H file for each level
# It is different for different dimensionalities, so we make a list
-_dim_finder = [ \
+_dim_finder = [
re.compile(r"\(\((\d+)\) \((\d+)\) \(\d+\)\)$"),
re.compile(r"\(\((\d+,\d+)\) \((\d+,\d+)\) \(\d+,\d+\)\)$"),
re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")]
# This is the line that prefixes each set of data for a FAB in the FAB file
# It is different for different dimensionalities, so we make a list
_endian_regex = r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), \(([0-9 ]+)\)\)\)"
-_header_pattern = [ \
- re.compile(_endian_regex +
+_header_pattern = [
+ re.compile(_endian_regex +
r"\(\((\d+)\) \((\d+)\) \((\d+)\)\) (\d+)\n"),
- re.compile(_endian_regex +
+ re.compile(_endian_regex +
r"\(\((\d+,\d+)\) \((\d+,\d+)\) \((\d+,\d+)\)\) (\d+)\n"),
- re.compile(_endian_regex +
+ re.compile(_endian_regex +
r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")]
-
class BoxlibGrid(AMRGridPatch):
_id_offset = 0
_offset = -1
- def __init__(self, grid_id, offset, filename = None,
- index = None):
+ def __init__(self, grid_id, offset, filename=None,
+ index=None):
super(BoxlibGrid, self).__init__(grid_id, filename, index)
self._base_offset = offset
self._parent_id = []
@@ -126,7 +117,7 @@
return coords
# Override this as well, since refine_by can vary
- def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
+ def _fill_child_mask(self, child, mask, tofill, dlevel=1):
rf = self.ds.ref_factors[self.Level]
if dlevel != 1:
raise NotImplementedError
@@ -139,8 +130,10 @@
startIndex[1]:endIndex[1],
startIndex[2]:endIndex[2]] = tofill
+
class BoxlibHierarchy(GridIndex):
grid = BoxlibGrid
+
def __init__(self, ds, dataset_type='boxlib_native'):
self.dataset_type = dataset_type
self.header_filename = os.path.join(ds.output_dir, 'Header')
@@ -149,19 +142,17 @@
GridIndex.__init__(self, ds, dataset_type)
self._cache_endianness(self.grids[-1])
- #self._read_particles()
-
def _parse_index(self):
"""
read the global header file for an Boxlib plotfile output.
"""
self.max_level = self.dataset._max_level
- header_file = open(self.header_filename,'r')
+ header_file = open(self.header_filename, 'r')
self.dimensionality = self.dataset.dimensionality
_our_dim_finder = _dim_finder[self.dimensionality-1]
- DRE = self.dataset.domain_right_edge # shortcut
- DLE = self.dataset.domain_left_edge # shortcut
+ DRE = self.dataset.domain_right_edge # shortcut
+ DLE = self.dataset.domain_left_edge # shortcut
# We can now skip to the point in the file we want to start parsing.
header_file.seek(self.dataset._header_mesh_start)
@@ -190,13 +181,13 @@
if int(header_file.next()) != 0:
raise RuntimeError("INTERNAL ERROR! This should be a zero.")
- # each level is one group with ngrids on it.
- # each grid has self.dimensionality number of lines of 2 reals
+ # each level is one group with ngrids on it.
+ # each grid has self.dimensionality number of lines of 2 reals
self.grids = []
grid_counter = 0
for level in range(self.max_level + 1):
vals = header_file.next().split()
- lev, ngrids, cur_time = int(vals[0]),int(vals[1]),float(vals[2])
+ lev, ngrids = int(vals[0]), int(vals[1])
assert(lev == level)
nsteps = int(header_file.next())
for gi in range(ngrids):
@@ -232,10 +223,10 @@
for gi in range(ngrids):
# components within it
start, stop = _our_dim_finder.match(level_header_file.next()).groups()
- # fix for non-3d data
+ # fix for non-3d data
# note we append '0' to both ends b/c of the '+1' in dims below
start += ',0'*(3-self.dimensionality)
- stop += ',0'*(3-self.dimensionality)
+ stop += ',0'*(3-self.dimensionality)
start = np.array(start.split(","), dtype="int64")
stop = np.array(stop.split(","), dtype="int64")
dims = stop - start + 1
@@ -259,7 +250,7 @@
# already read the filenames above...
self.float_type = 'float64'
- def _cache_endianness(self,test_grid):
+ def _cache_endianness(self, test_grid):
"""
Cache the endianness and bytes perreal of the grids by using a
test grid and assuming that all grids have the same
@@ -270,7 +261,7 @@
# open the test file & grab the header
with open(os.path.expanduser(test_grid.filename), 'rb') as f:
header = f.readline()
-
+
bpr, endian, start, stop, centering, nc = \
_header_pattern[self.dimensionality-1].search(header).groups()
# Note that previously we were using a different value for BPR than we
@@ -294,7 +285,8 @@
self.grids = np.array(self.grids, dtype='object')
self._reconstruct_parent_child()
for i, grid in enumerate(self.grids):
- if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+ if (i % 1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i,
+ self.num_grids)
grid._prepare_grid()
grid._setup_dx()
mylog.debug("Done creating grid objects")
@@ -308,10 +300,10 @@
self.grid_levels[i] + 1,
self.grid_left_edge, self.grid_right_edge,
self.grid_levels, mask)
- ids = np.where(mask.astype("bool")) # where is a tuple
- grid._children_ids = ids[0] + grid._id_offset
+ ids = np.where(mask.astype("bool")) # where is a tuple
+ grid._children_ids = ids[0] + grid._id_offset
mylog.debug("Second pass; identifying parents")
- for i, grid in enumerate(self.grids): # Second pass
+ for i, grid in enumerate(self.grids): # Second pass
for child in grid.Children:
child._parent_id.append(i + grid._id_offset)
@@ -331,10 +323,10 @@
for line in header_file:
if len(line.split()) != 3: continue
self.num_grids += int(line.split()[1])
-
+
def _initialize_grid_arrays(self):
super(BoxlibHierarchy, self)._initialize_grid_arrays()
- self.grid_start_index = np.zeros((self.num_grids,3), 'int64')
+ self.grid_start_index = np.zeros((self.num_grids, 3), 'int64')
def _initialize_state_variables(self):
"""override to not re-initialize num_grids in AMRHierarchy.__init__
@@ -349,7 +341,7 @@
self.field_list = [("boxlib", f) for f in
self.dataset._field_list]
self.field_indexes = dict((f[1], i)
- for i, f in enumerate(self.field_list))
+ for i, f in enumerate(self.field_list))
# There are times when field_list may change. We copy it here to
# avoid that possibility.
self.field_order = [f for f in self.field_list]
@@ -357,6 +349,7 @@
def _setup_data_io(self):
self.io = io_registry[self.dataset_type](self.dataset)
+
class BoxlibDataset(Dataset):
"""
This class is a stripped down class that simply reads and parses
@@ -370,10 +363,10 @@
periodicity = (True, True, True)
def __init__(self, output_dir,
- cparam_filename = "inputs",
- fparam_filename = "probin",
+ cparam_filename="inputs",
+ fparam_filename="probin",
dataset_type='boxlib_native',
- storage_filename = None):
+ storage_filename=None):
"""
The paramfile is usually called "inputs"
and there may be a fortran inputs file usually called "probin"
@@ -390,14 +383,13 @@
Dataset.__init__(self, output_dir, dataset_type)
# These are still used in a few places.
- if not "HydroMethod" in self.parameters.keys():
+ if "HydroMethod" not in self.parameters.keys():
self.parameters["HydroMethod"] = 'boxlib'
- self.parameters["Time"] = 1. # default unit is 1...
- self.parameters["EOSType"] = -1 # default
+ self.parameters["Time"] = 1. # default unit is 1...
+ self.parameters["EOSType"] = -1 # default
self.parameters["gamma"] = self.parameters.get(
"materials.gamma", 1.6667)
-
def _localize_check(self, fn):
# If the file exists, use it. If not, set it to None.
root_dir = os.path.dirname(self.output_dir)
@@ -420,11 +412,11 @@
args = inspect.getcallargs(cls.__init__, args, kwargs)
# This might need to be localized somehow
inputs_filename = os.path.join(
- os.path.dirname(os.path.abspath(output_dir)),
- args['cparam_filename'])
+ os.path.dirname(os.path.abspath(output_dir)),
+ args['cparam_filename'])
if not os.path.exists(inputs_filename) and \
not os.path.exists(jobinfo_filename):
- return True # We have no parameters to go off of
+ return True # We have no parameters to go off of
# If we do have either inputs or jobinfo, we should be deferring to a
# different frontend.
return False
@@ -466,7 +458,7 @@
self.omega_lambda = self.parameters["comoving_OmL"]
self.omega_matter = self.parameters["comoving_OmM"]
self.hubble_constant = self.parameters["comoving_h"]
- a_file = open(os.path.join(self.output_dir,'comoving_a'))
+ a_file = open(os.path.join(self.output_dir, 'comoving_a'))
line = a_file.readline().strip()
a_file.close()
self.current_redshift = 1/float(line) - 1
@@ -493,7 +485,7 @@
# So we'll try to determine this.
vals = vals.split()
if any(_scinot_finder.match(v) for v in vals):
- vals = [float(v.replace("D","e").replace("d","e"))
+ vals = [float(v.replace("D", "e").replace("d", "e"))
for v in vals]
if len(vals) == 1:
vals = vals[0]
@@ -511,22 +503,22 @@
# call readline() if we want to end up with an offset at the very end.
# Fortunately, elsewhere we don't care about the offset, so we're fine
# everywhere else using iteration exclusively.
- header_file = open(os.path.join(self.output_dir,'Header'))
+ header_file = open(os.path.join(self.output_dir, 'Header'))
self.orion_version = header_file.readline().rstrip()
n_fields = int(header_file.readline())
self._field_list = [header_file.readline().strip()
- for i in range(n_fields)]
+ for i in range(n_fields)]
self.dimensionality = int(header_file.readline())
self.current_time = float(header_file.readline())
# This is traditionally a index attribute, so we will set it, but
# in a slightly hidden variable.
- self._max_level = int(header_file.readline())
+ self._max_level = int(header_file.readline())
self.domain_left_edge = np.array(header_file.readline().split(),
dtype="float64")
self.domain_right_edge = np.array(header_file.readline().split(),
- dtype="float64")
+ dtype="float64")
ref_factors = np.array([int(i) for i in
header_file.readline().split()])
if ref_factors.size == 0:
@@ -542,26 +534,26 @@
self.refine_by = min(ref_factors)
# Check that they're all multiples of the minimum.
if not all(float(rf)/self.refine_by ==
- int(float(rf)/self.refine_by) for rf in ref_factors):
+ int(float(rf)/self.refine_by) for rf in ref_factors):
raise RuntimeError
base_log = np.log2(self.refine_by)
- self.level_offsets = [0] # level 0 has to have 0 offset
+ self.level_offsets = [0] # level 0 has to have 0 offset
lo = 0
for lm1, rf in enumerate(self.ref_factors):
lo += int(np.log2(rf) / base_log) - 1
self.level_offsets.append(lo)
- #assert(np.unique(ref_factors).size == 1)
+ # assert(np.unique(ref_factors).size == 1)
else:
self.refine_by = ref_factors[0]
self.level_offsets = [0 for l in range(self._max_level + 1)]
- # Now we read the global index space, to get
+ # Now we read the global index space, to get
index_space = header_file.readline()
# This will be of the form:
# ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
# So note that if we split it all up based on spaces, we should be
# fine, as long as we take the first two entries, which correspond to
# the root level. I'm not 100% pleased with this solution.
- root_space = index_space.replace("(","").replace(")","").split()[:2]
+ root_space = index_space.replace("(", "").replace(")", "").split()[:2]
start = np.array(root_space[0].split(","), dtype="int64")
stop = np.array(root_space[1].split(","), dtype="int64")
self.domain_dimensions = stop - start + 1
@@ -584,9 +576,9 @@
raise RuntimeError("yt does not yet support spherical geometry")
# overrides for 1/2-dimensional data
- if self.dimensionality == 1:
+ if self.dimensionality == 1:
self._setup1d()
- elif self.dimensionality == 2:
+ elif self.dimensionality == 2:
self._setup2d()
def _set_code_unit_attributes(self):
@@ -596,20 +588,20 @@
self.velocity_unit = self.quan(1.0, "cm/s")
def _setup1d(self):
-# self._index_class = BoxlibHierarchy1D
-# self._fieldinfo_fallback = Orion1DFieldInfo
+ # self._index_class = BoxlibHierarchy1D
+ # self._fieldinfo_fallback = Orion1DFieldInfo
self.domain_left_edge = \
np.concatenate([self.domain_left_edge, [0.0, 0.0]])
self.domain_right_edge = \
np.concatenate([self.domain_right_edge, [1.0, 1.0]])
tmp = self.domain_dimensions.tolist()
- tmp.extend((1,1))
+ tmp.extend((1, 1))
self.domain_dimensions = np.array(tmp)
tmp = list(self.periodicity)
tmp[1] = False
tmp[2] = False
self.periodicity = ensure_tuple(tmp)
-
+
def _setup2d(self):
self.domain_left_edge = \
np.concatenate([self.domain_left_edge, [0.0]])
@@ -638,12 +630,13 @@
offset = self.level_offsets[l1] - self.level_offsets[l0]
return self.refine_by**(l1-l0 + offset)
+
class OrionHierarchy(BoxlibHierarchy):
-
+
def __init__(self, ds, dataset_type='orion_native'):
BoxlibHierarchy.__init__(self, ds, dataset_type)
self._read_particles()
- #self.io = IOHandlerOrion
+ # self.io = IOHandlerOrion
def _read_particles(self):
"""
@@ -675,7 +668,7 @@
coord = [particle_position_x, particle_position_y, particle_position_z]
# for each particle, determine which grids contain it
# copied from object_finding_mixin.py
- mask=np.ones(self.num_grids)
+ mask = np.ones(self.num_grids)
for i in xrange(len(coord)):
np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
@@ -690,20 +683,21 @@
self.grid_particle_count[ind] += 1
self.grids[ind].NumberOfParticles += 1
return True
-
+
+
class OrionDataset(BoxlibDataset):
_index_class = OrionHierarchy
def __init__(self, output_dir,
- cparam_filename = "inputs",
- fparam_filename = "probin",
+ cparam_filename="inputs",
+ fparam_filename="probin",
dataset_type='orion_native',
- storage_filename = None):
+ storage_filename=None):
BoxlibDataset.__init__(self, output_dir,
- cparam_filename, fparam_filename, dataset_type)
-
+ cparam_filename, fparam_filename, dataset_type)
+
@classmethod
def _is_valid(cls, *args, **kwargs):
# fill our args
@@ -718,8 +712,8 @@
args = inspect.getcallargs(cls.__init__, args, kwargs)
# This might need to be localized somehow
inputs_filename = os.path.join(
- os.path.dirname(os.path.abspath(output_dir)),
- args['cparam_filename'])
+ os.path.dirname(os.path.abspath(output_dir)),
+ args['cparam_filename'])
if not os.path.exists(inputs_filename):
return False
if os.path.exists(jobinfo_filename):
@@ -732,6 +726,7 @@
if any(("geometry.prob_lo" in line for line in lines)): return True
return False
+
class CastroDataset(BoxlibDataset):
_field_info_class = CastroFieldInfo
@@ -754,6 +749,7 @@
if any(line.startswith("Castro ") for line in lines): return True
return False
+
class MaestroDataset(BoxlibDataset):
_field_info_class = MaestroFieldInfo
@@ -790,7 +786,7 @@
line = f.next()
# get the runtime parameters
for line in f:
- p, v = (_.strip() for _ in line[4:].split("=",1))
+ p, v = (_.strip() for _ in line[4:].split("=", 1))
if len(v) == 0:
self.parameters[p] = ""
else:
@@ -835,7 +831,7 @@
maxlevel = int(header.readline()) # max level
# Skip over how many grids on each level; this is degenerate
- for i in range(maxlevel + 1):dummy = header.readline()
+ for i in range(maxlevel + 1): dummy = header.readline()
grid_info = np.fromiter((int(i) for line in header.readlines()
for i in line.split()),
@@ -852,6 +848,7 @@
self.grid_particle_count[:, 0] = grid_info[:, 1]
+
class NyxDataset(BoxlibDataset):
_index_class = NyxHierarchy
@@ -874,13 +871,11 @@
pfn = os.path.join(pfname)
if not os.path.exists(pfn) or os.path.isdir(pfn): return False
nyx = any(("nyx." in line for line in open(pfn)))
- maestro = os.path.exists(os.path.join(pname, "job_info"))
- orion = (not nyx) and (not maestro)
return nyx
def _parse_parameter_file(self):
super(NyxDataset, self)._parse_parameter_file()
- #return
+ # return
# Nyx is always cosmological.
self.cosmological_simulation = 1
self.omega_lambda = self.parameters["comoving_OmL"]
@@ -914,7 +909,7 @@
v = vals.split()[0] # Just in case there are multiple; we'll go
# back afterward to using vals.
try:
- float(v.upper().replace("D","E"))
+ float(v.upper().replace("D", "E"))
except:
pcast = str
if v in ("F", "T"):
https://bitbucket.org/yt_analysis/yt/commits/f44bd8c58262/
Changeset: f44bd8c58262
Branch: yt
User: ChrisMalone
Date: 2014-09-08 22:35:23
Summary: merge
Affected #: 12 files
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -129,7 +129,14 @@
are center_of_mass and bulk_velocity. Their definitions are available in
``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that
your quantity may be of use to the general community, add it to
-``halo_quantities.py`` and issue a pull request.
+``halo_quantities.py`` and issue a pull request. Default halo quantities are:
+
+* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
+* ``particle_mass`` -- Mass of halo
+* ``particle_position_x`` -- Location of halo
+* ``particle_position_y`` -- Location of halo
+* ``particle_position_z`` -- Location of halo
+* ``virial_radius`` -- Virial radius of halo
An example of adding a quantity:
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -75,7 +75,8 @@
mass. In simulations where the highest-resolution particles all have the
same mass (ie: zoom-in grid based simulations), one can set up a particle
filter to select the lowest mass particles and perform the halo finding
- only on those.
+ only on those. See the this cookbook recipe for an example:
+ :ref:`cookbook-rockstar-nested-grid`.
To run the Rockstar Halo finding, you must launch python with MPI and
parallelization enabled. While Rockstar itself does not require MPI to run,
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -14,6 +14,22 @@
.. yt_cookbook:: halo_plotting.py
+.. _cookbook-rockstar-nested-grid:
+
+Running Rockstar to Find Halos on Multi-Resolution-Particle Datasets
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The version of Rockstar installed with yt does not have the capability
+to work on datasets with particles of different masses. Unfortunately,
+many simulations possess particles of different masses, notably cosmological
+zoom datasets. This recipe uses Rockstar in two different ways to generate a
+HaloCatalog from the highest resolution dark matter particles (the ones
+inside the zoom region). It then overlays some of those halos on a projection
+as a demonstration. See :ref:`halo-analysis` and :ref:`annotate-halos` for
+more information.
+
+.. yt_cookbook:: rockstar_nest.py
+
.. _cookbook-halo_finding:
Halo Profiling and Custom Analysis
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e doc/source/cookbook/power_spectrum_example.py
--- a/doc/source/cookbook/power_spectrum_example.py
+++ b/doc/source/cookbook/power_spectrum_example.py
@@ -57,7 +57,7 @@
# physical limits to the wavenumbers
kmin = np.min(1.0/L)
- kmax = np.max(0.5*dims/L)
+ kmax = np.min(0.5*dims/L)
kbins = np.arange(kmin, kmax, kmin)
N = len(kbins)
@@ -112,7 +112,6 @@
return np.abs(ru)**2
-if __name__ == "__main__":
- ds = yt.load("maestro_xrb_lores_23437")
- doit(ds)
+ds = yt.load("maestro_xrb_lores_23437")
+doit(ds)
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e doc/source/cookbook/rockstar_nest.py
--- /dev/null
+++ b/doc/source/cookbook/rockstar_nest.py
@@ -0,0 +1,74 @@
+# You must run this job in parallel.
+# There are several mpi flags which can be useful in order for it to work OK.
+# It requires at least 3 processors in order to run because of the way in which
+# rockstar divides up the work. Make sure you have mpi4py installed as per
+# http://yt-project.org/docs/dev/analyzing/parallel_computation.html#setting-up-parallel-yt
+
+# Usage: mpirun -np <num_procs> --mca btl ^openib python this_script.py
+
+import yt
+from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
+from yt.data_objects.particle_filters import add_particle_filter
+from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+yt.enable_parallelism() # rockstar halofinding requires parallelism
+
+# Create a dark matter particle filter
+# This will be code dependent, but this function here is true for enzo
+
+def DarkMatter(pfilter, data):
+ filter = data[("all", "particle_type")] == 1 # DM = 1, Stars = 2
+ return filter
+
+add_particle_filter("dark_matter", function=DarkMatter, filtered_type='all', \
+ requires=["particle_type"])
+
+# First, we make sure that this script is being run using mpirun with
+# at least 3 processors as indicated in the comments above.
+assert(yt.communication_system.communicators[-1].size >= 3)
+
+# Load the dataset and apply dark matter filter
+fn = "Enzo_64/DD0043/data0043"
+ds = yt.load(fn)
+ds.add_particle_filter('dark_matter')
+
+# Determine highest resolution DM particle mass in sim by looking
+# at the extrema of the dark_matter particle_mass field.
+ad = ds.all_data()
+min_dm_mass = ad.quantities.extrema(('dark_matter','particle_mass'))[0]
+
+# Define a new particle filter to isolate all highest resolution DM particles
+# and apply it to dataset
+def MaxResDarkMatter(pfilter, data):
+ return data["particle_mass"] <= 1.01 * min_dm_mass
+
+add_particle_filter("max_res_dark_matter", function=MaxResDarkMatter, \
+ filtered_type='dark_matter', requires=["particle_mass"])
+ds.add_particle_filter('max_res_dark_matter')
+
+# If desired, we can see the total number of DM and High-res DM particles
+#if yt.is_root():
+# print "Simulation has %d DM particles." % ad['dark_matter','particle_type'].shape
+# print "Simulation has %d Highest Res DM particles." % ad['max_res_dark_matter', 'particle_type'].shape
+
+# Run the halo catalog on the dataset only on the highest resolution dark matter
+# particles
+hc = HaloCatalog(data_ds=ds, finder_method='rockstar', \
+ finder_kwargs={'dm_only':True, 'particle_type':'max_res_dark_matter'})
+hc.create()
+
+# Or alternatively, just run the RockstarHaloFinder and later import the
+# output file as necessary. You can skip this step if you've already run it
+# once, but be careful since subsequent halo finds will overwrite this data.
+#rhf = RockstarHaloFinder(ds, particle_type="max_res_dark_matter")
+#rhf.run()
+# Load the halo list from a rockstar output for this dataset
+# Create a projection with the halos overplot on top
+#halos = yt.load('rockstar_halos/halos_0.0.bin')
+#hc = HaloCatalog(halos_ds=halos)
+#hc.load()
+
+# Regardless of your method of creating the halo catalog, use it to overplot the
+# halos on a projection.
+p = yt.ProjectionPlot(ds, "x", "density")
+p.annotate_halos(hc, annotate_field = 'particle_identifier', width=(10,'Mpc'), factor=2)
+p.save()
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e doc/source/cookbook/tests/test_cookbook.py
--- /dev/null
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""Module for cookbook testing
+
+
+This test should be run from main yt directory.
+
+Example:
+
+ $ sed -e '/where/d' -i nose.cfg setup.cfg
+ $ nosetests doc/source/cookbook/tests/test_cookbook.py -P -v
+"""
+import glob
+import os
+import sys
+
+sys.path.append(os.path.join(os.getcwd(), "doc/source/cookbook"))
+
+
+def test_recipe():
+ '''Dummy test grabbing all cookbook's recipes'''
+ for fname in glob.glob("doc/source/cookbook/*.py"):
+ module_name = os.path.splitext(os.path.basename(fname))[0]
+ yield check_recipe, module_name
+
+
+def check_recipe(module_name):
+ '''Run single recipe'''
+ __import__(module_name)
+ assert True
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -4,7 +4,7 @@
ds = yt.load("Enzo_64/DD0030/data0030")
# Make a projection that is the full width of the domain,
-# but only 10 Mpc in depth. This is done by creating a
+# but only 5 Mpc in depth. This is done by creating a
# region object with this exact geometry and providing it
# as a data_source for the projection.
@@ -17,12 +17,12 @@
right_corner = ds.domain_right_edge
# Now adjust the size of the region along the line of sight (x axis).
-depth = ds.quan(10.0,'Mpc')
+depth = ds.quan(5.0,'Mpc')
left_corner[0] = center[0] - 0.5 * depth
-left_corner[0] = center[0] + 0.5 * depth
+right_corner[0] = center[0] + 0.5 * depth
# Create the region
-region = ds.region(center, left_corner, right_corner)
+region = ds.box(left_corner, right_corner)
# Create a density projection and supply the region we have just created.
# Only cells within the region will be included in the projection.
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -151,19 +151,28 @@
Overplot Halo Annotations
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. function:: annotate_halos(self, halo_catalog, col='white', alpha=1, \
- width=None):
+.. function:: annotate_halos(self, halo_catalog, circle_kwargs=None, width=None, \
+ annotate_field=False, font_kwargs=None, factor=1.0):
(This is a proxy for
:class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
Accepts a :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
- and plots a circle at the location of each
- halo with the radius of the circle corresponding to the virial radius of the
- halo. If ``width`` is set to None (default) all halos are plotted.
- Otherwise, only halos that fall within a slab with width ``width`` centered
- on the center of the plot data. The color and transparency of the circles can
- be controlled with ``col`` and ``alpha`` respectively.
+ and plots a circle at the location of each halo with the radius of the
+ circle corresponding to the virial radius of the halo. If ``width`` is set
+ to None (default) all halos are plotted, otherwise it accepts a tuple in
+ the form (1.0, ‘Mpc’) to only display halos that fall within a slab with
+ width ``width`` centered on the center of the plot data. The appearance of
+ the circles can be changed with the circle_kwargs dictionary, which is
+ supplied to the Matplotlib patch Circle. One can label each of the halos
+ with the annotate_field, which accepts a field contained in the halo catalog
+ to add text to the plot near the halo (example: annotate_field =
+ ``particle_mass`` will write the halo mass next to each halo, whereas
+ ``particle_identifier`` shows the halo number). font_kwargs contains the
+ arguments controlling the text appearance of the annotated field.
+ Factor is the number the virial radius is multiplied by for plotting the
+ circles. Ex: factor = 2.0 will plot circles with twice the radius of each
+ halo virial radius.
.. python-script::
@@ -177,7 +186,7 @@
hc.create()
prj = yt.ProjectionPlot(data_ds, 'z', 'density')
- prj.annotate_halos(hc)
+ prj.annotate_halos(hc, annotate_field=particle_identifier)
prj.save()
Overplot a Straight Line
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e yt/data_objects/tests/test_spheres.py
--- a/yt/data_objects/tests/test_spheres.py
+++ b/yt/data_objects/tests/test_spheres.py
@@ -6,10 +6,11 @@
from yt.config import ytcfg
ytcfg["yt","__withintesting"] = "True"
+_fields_to_compare = ("spherical_r", "cylindrical_r",
+ "spherical_theta", "cylindrical_theta",
+ "spherical_phi", "cylindrical_z")
+
def test_domain_sphere():
- ds = fake_random_ds(16, fields = ("density"))
- sp = ds.sphere(ds.domain_center, ds.domain_width[0])
-
# Now we test that we can get different radial velocities based on field
# parameters.
@@ -51,3 +52,12 @@
yield assert_equal, np.any(rp0["radial_velocity"][rp0.used] ==
rp1["radial_velocity"][rp1.used]), \
False
+
+ ref_sp = ds.sphere("c", 0.25)
+ for f in _fields_to_compare:
+ ref_sp[f].sort()
+ for center in periodicity_cases(ds):
+ sp = ds.sphere(center, 0.25)
+ for f in _fields_to_compare:
+ sp[f].sort()
+ yield assert_equal, sp[f], ref_sp[f]
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -418,7 +418,7 @@
cdef np.ndarray[np.uint8_t, ndim=1] coords
cdef OctVisitorData data
self.setup_data(&data, domain_id)
- coords = np.zeros((num_cells*8), dtype="uint8")
+ coords = np.zeros((num_cells*data.nz), dtype="uint8")
data.array = <void *> coords.data
self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
return coords.astype("bool")
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -624,6 +624,18 @@
return _func
return compare_results(func)
+def periodicity_cases(ds):
+ # This is a generator that yields things near the corners. It's good for
+ # getting different places to check periodicity.
+ yield (ds.domain_left_edge + ds.domain_right_edge)/2.0
+ dx = ds.domain_width / ds.domain_dimensions
+ # We start one dx in, and only go to one in as well.
+ for i in (1, ds.domain_dimensions[0] - 2):
+ for j in (1, ds.domain_dimensions[1] - 2):
+ for k in (1, ds.domain_dimensions[2] - 2):
+ center = dx * np.array([i,j,k]) + ds.domain_left_edge
+ yield center
+
def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
import nose, os, sys, yt
from yt.funcs import mylog
diff -r eab553baeebb53defa0e715ee30be4c6b512928a -r f44bd8c582625deab1b2157ae8786db16285664e yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -907,8 +907,8 @@
class HaloCatalogCallback(PlotCallback):
"""
annotate_halos(halo_catalog, circle_kwargs=None,
- width = None, annotate_field=False,
- font_kwargs = None, factor = 1.0)
+ width = None, annotate_field = False,
+ font_kwargs=None, factor = 1.0)
Plots circles at the locations of all the halos
in a halo catalog with radii corresponding to the
@@ -935,14 +935,16 @@
region = None
_descriptor = None
- def __init__(self, halo_catalog, circle_kwargs = None,
+ def __init__(self, halo_catalog, circle_kwargs=None,
width = None, annotate_field = False,
- font_kwargs = None, factor = 1.0):
+ font_kwargs=None, factor = 1.0):
PlotCallback.__init__(self)
self.halo_catalog = halo_catalog
self.width = width
self.annotate_field = annotate_field
+ if font_kwargs is None:
+ font_kwargs = {'color':'white'}
self.font_kwargs = font_kwargs
self.factor = factor
if circle_kwargs is None:
@@ -1005,7 +1007,7 @@
if self.annotate_field:
annotate_dat = halo_data[self.annotate_field]
- texts = ['{0}'.format(dat) for dat in annotate_dat]
+ texts = ['{:g}'.format(float(dat))for dat in annotate_dat]
for pos_x, pos_y, t in zip(px, py, texts):
plot._axes.text(pos_x, pos_y, t, **self.font_kwargs)
https://bitbucket.org/yt_analysis/yt/commits/137ae21c9876/
Changeset: 137ae21c9876
Branch: yt
User: ChrisMalone
Date: 2014-09-08 22:38:37
Summary: better check for MAESTRO data
Affected #: 1 file
diff -r f44bd8c582625deab1b2157ae8786db16285664e -r 137ae21c98766f575012a627c3634c7cfc8a9452 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -769,7 +769,7 @@
return False
# Now we check the job_info for the mention of maestro
lines = open(jobinfo_filename).readlines()
- if any("maestro" in line.lower() for line in lines): return True
+ if any(line.startswith("MAESTRO ") for line in lines): return True
return False
def _parse_parameter_file(self):
https://bitbucket.org/yt_analysis/yt/commits/c9ba2fc3898d/
Changeset: c9ba2fc3898d
Branch: yt
User: ChrisMalone
Date: 2014-09-08 23:04:27
Summary: l -> L
Affected #: 1 file
diff -r 137ae21c98766f575012a627c3634c7cfc8a9452 -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1,5 +1,5 @@
"""
-Data structures for Boxlib Codes
+Data structures for BoxLib Codes
https://bitbucket.org/yt_analysis/yt/commits/acb91ec703a3/
Changeset: acb91ec703a3
Branch: yt
User: ChrisMalone
Date: 2014-09-11 17:56:04
Summary: merge
Affected #: 52 files
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -6,12 +6,17 @@
There are several colormaps available for yt. yt includes all of the
matplotlib colormaps as well for nearly all functions. Individual visualization
functions usually allow you to specify a colormap with the ``cmap`` flag.
-There are a small number of functions (mostly contained in the image_writer
-module; e.g. write_bitmap, write_image, write_projection, etc.), which do
-not load the matplotlib infrastructure and can only access the colormaps
-native to yt.
-Here is a chart of all of the colormaps available. In addition to each
+If you have installed `brewer2mpl`
+(`pip install brewer2mpl` or see `https://github.com/jiffyclub/brewer2mpl`_),
+you can also access the discrete colormaps available on
+`http://colorbrewer2.org`_. Instead of supplying the colormap name, specify
+a tuple of the form (name, type, number), for example `('RdBu', 'Diverging', 9)`.
+These discrete colormaps will not be interpolated, and can be useful for
+creating colorblind/printer/grayscale-friendly plots. For more information,
+visit `http://colorbrewer2.org`_.
+
+Here is a chart of all of the yt and matplotlib colormaps available. In addition to each
colormap displayed here, you can access its "reverse" by simply appending a
``"_r"`` to the end of the colormap name.
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -195,7 +195,6 @@
## tau_0
tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
column_density * fval / vdop
- tau1 = tau_X * lam1cgs
tau0 = tau_X * lam0cgs
# dimensionless frequency offset in units of doppler freq
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -328,7 +328,7 @@
output["redshift"])
proper_box_size = self.simulation.box_size / \
(1.0 + output["redshift"])
- pixel_xarea = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
+ pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
mylog.info("Distance to slice = %s" % dL)
frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/halo_analysis/fields.py
--- a/yt/analysis_modules/halo_analysis/fields.py
+++ b/yt/analysis_modules/halo_analysis/fields.py
@@ -30,7 +30,7 @@
sl_right = slice(2, None, None)
div_fac = 2.0
else:
- sl_left, sl_right, div_face = slice_info
+ sl_left, sl_right, div_fac = slice_info
def _virial_radius(field, data):
virial_radius = data.get_field_parameter("virial_radius")
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -80,7 +80,6 @@
"""
dds = halo.halo_catalog.data_ds
- hds = halo.halo_catalog.halos_ds
center = dds.arr([halo.quantities["particle_position_%s" % axis] \
for axis in "xyz"])
radius = factor * halo.quantities[radius_field]
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -788,7 +788,7 @@
# Now compute the CDM+HDM+baryon transfer functions
tf_cb = self.tf_master*self.growth_cb/self.growth_k0;
- tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0;
+ #tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0;
return tf_cb
@@ -832,7 +832,6 @@
area1 = np.sum(areas)
# Now we refine until the error is smaller than *error*.
diff = area1 - area0
- area_final = area1
area_last = area1
one_pow = 3
while diff > error:
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -32,7 +32,6 @@
contours = {}
node_ids = []
DLE = data_source.ds.domain_left_edge
- total_vol = None
selector = getattr(data_source, "base_object", data_source).selector
masks = dict((g.id, m) for g, m in data_source.blocks)
for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -128,7 +128,6 @@
energy = self.spectral_model.ebins
cell_em = EM[idxs]*vol_scale
- cell_vol = vol[idxs]*vol_scale
number_of_photons = np.zeros(dshape, dtype='uint64')
energies = []
@@ -139,7 +138,6 @@
for i, ikT in enumerate(kT_idxs):
- ncells = int(bcounts[i])
ibegin = bcell[i]
iend = ecell[i]
kT = kT_bins[ikT] + 0.5*dkT
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -490,7 +490,6 @@
z_hat = orient.unit_vectors[2]
n_ph = self.photons["NumberOfPhotons"]
- num_cells = len(n_ph)
n_ph_tot = n_ph.sum()
eff_area = None
@@ -667,7 +666,6 @@
tblhdu = hdulist["MATRIX"]
n_de = len(tblhdu.data["ENERG_LO"])
mylog.info("Number of energy bins in RMF: %d" % (n_de))
- de = tblhdu.data["ENERG_HI"] - tblhdu.data["ENERG_LO"]
mylog.info("Energy limits: %g %g" % (min(tblhdu.data["ENERG_LO"]),
max(tblhdu.data["ENERG_HI"])))
@@ -682,7 +680,6 @@
phYY = events["ypix"][eidxs]
detectedChannels = []
- pindex = 0
# run through all photon energies and find which bin they go in
k = 0
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -128,7 +128,6 @@
if fni.endswith('.fits'):
fni = fni.replace('.fits','')
- ndomains_finished = 0
for (num_halos, domain, halos) in domains_list:
dle,dre = domain
print 'exporting: '
@@ -154,7 +153,6 @@
fh.write("%6.6e \n"%(halo.Rvir*ds['kpc']))
fh.close()
export_to_sunrise(ds, fnf, star_particle_type, dle*1.0/dn, dre*1.0/dn)
- ndomains_finished +=1
def domains_from_halos(ds,halo_list,frvir=0.15):
domains = {}
@@ -172,8 +170,6 @@
domains_list = [(len(v),k,v) for k,v in domains.iteritems()]
domains_list.sort()
domains_list.reverse() #we want the most populated domains first
- domains_limits = [d[1] for d in domains_list]
- domains_halos = [d[2] for d in domains_list]
return domains_list
def prepare_octree(ds,ile,start_level=0,debug=True,dd=None,center=None):
@@ -245,10 +241,6 @@
hs = hilbert_state()
start_time = time.time()
if debug:
- if center is not None:
- c = center*ds['kpc']
- else:
- c = ile*1.0/ds.domain_dimensions*ds['kpc']
printing = lambda x: print_oct(x)
else:
printing = None
@@ -332,7 +324,7 @@
#then translate onto the subgrid integer index
parent_fle = grid.left_edges + cell_index*grid.dx
subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
- for i, (vertex,hilbert_child) in enumerate(hilbert):
+ for (vertex, hilbert_child) in hilbert:
#vertex is a combination of three 0s and 1s to
#denote each of the 8 octs
if level < 0:
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -89,8 +89,6 @@
L = 2 * R * cm_per_kpc
bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) * L
- dl = L/nz
-
ds = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox)
ds.index
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -418,7 +418,6 @@
otherwise Glue will be started.
"""
from glue.core import DataCollection, Data
- from glue.core.coordinates import coordinates_from_header
from glue.qt.glue_application import GlueApplication
gdata = Data(label=label)
@@ -494,6 +493,18 @@
ftype = self._current_fluid_type
if (ftype, fname) not in self.ds.field_info:
ftype = self.ds._last_freq[0]
+
+ # really ugly check to ensure that this field really does exist somewhere,
+ # in some naming convention, before returning it as a possible field type
+ if (ftype,fname) not in self.ds.field_list and \
+ fname not in self.ds.field_list and \
+ (ftype,fname) not in self.ds.derived_field_list and \
+ fname not in self.ds.derived_field_list and \
+ (ftype,fname) not in self._container_fields:
+ raise YTFieldNotFound((ftype,fname),self.ds)
+
+ # these tests are really insufficient as a field type may be valid, and the
+ # field name may be valid, but not the combination (field type, field name)
if finfo.particle_type and ftype not in self.ds.particle_types:
raise YTFieldTypeNotFound(ftype)
elif not finfo.particle_type and ftype not in self.ds.fluid_types:
@@ -621,7 +632,7 @@
fields_to_generate.append(field)
continue
fields_to_get.append(field)
- if len(fields_to_get) == 0 and fields_to_generate == 0:
+ if len(fields_to_get) == 0 and len(fields_to_generate) == 0:
return
elif self._locked == True:
raise GenerationInProgress(fields)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -460,8 +460,6 @@
self._last_freq = field
self._last_finfo = self.field_info[(ftype, fname)]
return self._last_finfo
- if fname == self._last_freq[1]:
- return self._last_finfo
if fname in self.field_info:
# Sometimes, if guessing_type == True, this will be switched for
# the type of field it is. So we look at the field type and
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -322,10 +322,6 @@
create_magnitude_field(registry, "particle_specific_angular_momentum",
"cm**2/s", ftype=ptype, particle_type=True)
- def _particle_angular_momentum(field, data):
- return data[ptype, "particle_mass"] \
- * data[ptype, "particle_specific_angular_momentum"]
-
def _particle_angular_momentum_x(field, data):
return data[ptype, "particle_mass"] * \
data[ptype, "particle_specific_angular_momentum_x"]
@@ -350,6 +346,15 @@
units="g*cm**2/s", particle_type=True,
validators=[ValidateParameter('center')])
+ def _particle_angular_momentum(field, data):
+ return data[ptype, "particle_mass"] \
+ * data[ptype, "particle_specific_angular_momentum"]
+ registry.add_field((ptype, "particle_angular_momentum"),
+ function=_particle_angular_momentum,
+ particle_type=True,
+ units="g*cm**2/s",
+ validators=[ValidateParameter("center")])
+
create_magnitude_field(registry, "particle_angular_momentum",
"g*cm**2/s", ftype=ptype, particle_type=True)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -131,7 +131,7 @@
registry.add_field((ftype, "radial_%s" % basename),
function = _radial, units = field_units)
registry.add_field((ftype, "radial_%s_absolute" % basename),
- function = _radial, units = field_units)
+ function = _radial_absolute, units = field_units)
registry.add_field((ftype, "tangential_%s" % basename),
function=_tangential, units = field_units)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -14,11 +14,13 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-from yt.testing import *
+from yt.testing import \
+ requires_file, \
+ assert_equal
from yt.utilities.answer_testing.framework import \
requires_ds, \
- small_patch_amr, \
big_patch_amr, \
+ PixelizedProjectionValuesTest, \
data_dir_load
from yt.frontends.art.api import ARTDataset
@@ -41,3 +43,8 @@
yield PixelizedProjectionValuesTest(
d9p, axis, field, weight_field,
dobj_name)
+
+
+ at requires_file(d9p)
+def test_ARTDataset():
+ assert isinstance(data_dir_load(d9p), ARTDataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -1,5 +1,5 @@
"""
-ARTIO frontend tests
+ARTIO frontend tests
@@ -24,7 +24,7 @@
from yt.frontends.artio.api import ARTIODataset
_fields = ("temperature", "density", "velocity_magnitude",
- ("deposit", "all_density"), ("deposit", "all_count"))
+ ("deposit", "all_density"), ("deposit", "all_count"))
sizmbhloz = "sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art"
@requires_ds(sizmbhloz)
@@ -45,3 +45,8 @@
s1 = dobj["ones"].sum()
s2 = sum(mask.sum() for block, mask in dobj.blocks)
yield assert_equal, s1, s2
+
+
+ at requires_file(sizmbhloz)
+def test_ARTIODataset():
+ assert isinstance(data_dir_load(sizmbhloz), ARTIODataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -57,3 +57,8 @@
for test in small_patch_amr(stripping, _fields_stripping):
test_stripping.__name__ = test.description
yield test
+
+
+ at requires_file(cloud)
+def test_AthenaDataset():
+ assert isinstance(data_dir_load(cloud), AthenaDataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -42,3 +42,8 @@
for test in small_patch_amr(rt, _fields):
test_radtube.__name__ = test.description
yield test
+
+
+ at requires_file(rt)
+def test_OrionDataset():
+ assert isinstance(data_dir_load(rt), OrionDataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -13,15 +13,18 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-from yt.testing import *
+from yt.testing import \
+ requires_file, \
+ assert_equal
from yt.utilities.answer_testing.framework import \
requires_ds, \
small_patch_amr, \
- big_patch_amr, \
data_dir_load
-from yt.frontends.chombo.api import ChomboDataset
+from yt.frontends.chombo.api import \
+ ChomboDataset, \
+ Orion2Dataset
-_fields = ("density", "velocity_magnitude", #"velocity_divergence",
+_fields = ("density", "velocity_magnitude", # "velocity_divergence",
"magnetic_field_x")
gc = "GaussianCloud/data.0077.3d.hdf5"
@@ -49,6 +52,22 @@
def test_zp():
ds = data_dir_load(zp)
yield assert_equal, str(ds), "plt32.2d.hdf5"
- for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
+ for test in small_patch_amr(zp, _zp_fields, input_center="c",
+ input_weight="rhs"):
test_tb.__name__ = test.description
yield test
+
+
+ at requires_file(zp)
+def test_ChomboDataset():
+ assert isinstance(data_dir_load(zp), ChomboDataset)
+
+
+ at requires_file(gc)
+def test_Orion2Dataset():
+ assert isinstance(data_dir_load(gc), Orion2Dataset)
+
+
+#@requires_file(kho)
+#def test_PlutoDataset():
+# assert isinstance(data_dir_load(kho), PlutoDataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -42,3 +42,8 @@
for test in small_patch_amr(wt, _fields_2d):
test_wind_tunnel.__name__ = test.description
yield test
+
+
+ at requires_file(wt)
+def test_FLASHDataset():
+ assert isinstance(data_dir_load(wt), FLASHDataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -56,3 +56,7 @@
for dobj_name in dso:
yield FieldValuesTest(c5, field, dobj_name)
+
+ at requires_file(c5)
+def test_MoabHex8Dataset():
+ assert isinstance(data_dir_load(c5), MoabHex8Dataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -94,8 +94,9 @@
return rv
self.add_field(("gas", "temperature"), function=_temperature,
units="K")
+ self.create_cooling_fields()
- def create_cooling_fields(self, filename):
+ def create_cooling_fields(self):
num = os.path.basename(self.ds.parameter_filename).split("."
)[0].split("_")[1]
filename = "%s/cooling_%05i.out" % (
@@ -104,7 +105,7 @@
if not os.path.exists(filename): return
def _create_field(name, interp_object):
def _func(field, data):
- shape = data["Temperature"].shape
+ shape = data["temperature"].shape
d = {'lognH': np.log10(_X*data["density"]/mh).ravel(),
'logT' : np.log10(data["temperature"]).ravel()}
rv = 10**interp_object(d).reshape(shape)
@@ -131,4 +132,4 @@
interp = BilinearFieldInterpolator(tvals[n],
(avals["lognH"], avals["logT"]),
["lognH", "logT"], truncate = True)
- _create_field(n, interp)
+ _create_field(("gas", n), interp)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -1,5 +1,5 @@
"""
-RAMSES frontend tests
+RAMSES frontend tests
@@ -21,10 +21,10 @@
PixelizedProjectionValuesTest, \
FieldValuesTest, \
create_obj
-from yt.frontends.artio.api import ARTIODataset
+from yt.frontends.ramses.api import RAMSESDataset
_fields = ("temperature", "density", "velocity_magnitude",
- ("deposit", "all_density"), ("deposit", "all_count"))
+ ("deposit", "all_density"), ("deposit", "all_count"))
output_00080 = "output_00080/info_00080.txt"
@requires_ds(output_00080)
@@ -44,3 +44,8 @@
s1 = dobj["ones"].sum()
s2 = sum(mask.sum() for block, mask in dobj.blocks)
yield assert_equal, s1, s2
+
+
+ at requires_file(output_00080)
+def test_RAMSESDataset():
+ assert isinstance(data_dir_load(output_00080), RAMSESDataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -53,7 +53,7 @@
_vector_fields = ("Coordinates", "Velocity", "Velocities")
_known_ptypes = ghdf5_ptypes
_var_mass = None
- _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
+ _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
'Neon', 'Magnesium', 'Silicon', 'Iron' )
@@ -81,6 +81,8 @@
f = _get_h5_handle(data_file.filename)
# This double-reads
for ptype, field_list in sorted(ptf.items()):
+ if data_file.total_particles[ptype] == 0:
+ continue
x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
@@ -96,6 +98,8 @@
for data_file in sorted(data_files):
f = _get_h5_handle(data_file.filename)
for ptype, field_list in sorted(ptf.items()):
+ if data_file.total_particles[ptype] == 0:
+ continue
g = f["/%s" % ptype]
coords = g["Coordinates"][:].astype("float64")
mask = selector.select_points(
@@ -103,11 +107,11 @@
del coords
if mask is None: continue
for field in field_list:
-
+
if field in ("Mass", "Masses") and \
ptype not in self.var_mass:
data = np.empty(mask.sum(), dtype="float64")
- ind = self._known_ptypes.index(ptype)
+ ind = self._known_ptypes.index(ptype)
data[:] = self.ds["Massarr"][ind]
elif field in self._element_names:
@@ -152,7 +156,7 @@
f = _get_h5_handle(data_file.filename)
pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
f.close()
- npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
+ npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
return npart
@@ -164,7 +168,7 @@
# loop over all keys in OWLS hdf5 file
#--------------------------------------------------
- for key in f.keys():
+ for key in f.keys():
# only want particle data
#--------------------------------------
@@ -334,7 +338,7 @@
def _count_particles(self, data_file):
npart = dict((self._ptypes[i], v)
- for i, v in enumerate(data_file.header["Npart"]))
+ for i, v in enumerate(data_file.header["Npart"]))
return npart
# header is 256, but we have 4 at beginning and end for ints
@@ -443,13 +447,13 @@
dtype = None
# We need to do some fairly ugly detection to see what format the auxiliary
# files are in. They can be either ascii or binary, and the binary files can be
- # either floats, ints, or doubles. We're going to use a try-catch cascade to
+ # either floats, ints, or doubles. We're going to use a try-catch cascade to
# determine the format.
try:#ASCII
auxdata = np.genfromtxt(filename, skip_header=1)
if auxdata.size != np.sum(data_file.total_particles.values()):
print "Error reading auxiliary tipsy file"
- raise RuntimeError
+ raise RuntimeError
except ValueError:#binary/xdr
f = open(filename, 'rb')
l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
@@ -469,7 +473,7 @@
except struct.error: # None of the binary attempts to read succeeded
print "Error reading auxiliary tipsy file"
raise RuntimeError
-
+
# Use the mask to slice out the appropriate particle type data
if mask.size == data_file.total_particles['Gas']:
return auxdata[:data_file.total_particles['Gas']]
@@ -556,14 +560,14 @@
def _update_domain(self, data_file):
'''
- This method is used to determine the size needed for a box that will
+ This method is used to determine the size needed for a box that will
bound the particles. It simply finds the largest position of the
whole set of particles, and sets the domain to +/- that value.
'''
ds = data_file.ds
ind = 0
# Check to make sure that the domain hasn't already been set
- # by the parameter file
+ # by the parameter file
if np.all(np.isfinite(ds.domain_left_edge)) and np.all(np.isfinite(ds.domain_right_edge)):
return
with open(data_file.filename, "rb") as f:
@@ -682,11 +686,11 @@
continue
field_list.append((ptype, field))
if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
- field_list += [("Gas",a) for a in self._aux_fields]
+ field_list += [("Gas",a) for a in self._aux_fields]
if any(["DarkMatter"==f[0] for f in field_list]):
- field_list += [("DarkMatter",a) for a in self._aux_fields]
+ field_list += [("DarkMatter",a) for a in self._aux_fields]
if any(["Stars"==f[0] for f in field_list]):
- field_list += [("Stars",a) for a in self._aux_fields]
+ field_list += [("Stars",a) for a in self._aux_fields]
self._field_list = field_list
return self._field_list
@@ -706,11 +710,11 @@
class IOHandlerHTTPStream(BaseIOHandler):
_dataset_type = "http_particle_stream"
_vector_fields = ("Coordinates", "Velocity", "Velocities")
-
+
def __init__(self, ds):
if requests is None:
raise RuntimeError
- self._url = ds.base_url
+ self._url = ds.base_url
# This should eventually manage the IO and cache it
self.total_bytes = 0
super(IOHandlerHTTPStream, self).__init__(ds)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/sph/tests/test_owls.py
--- a/yt/frontends/sph/tests/test_owls.py
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -53,3 +53,8 @@
s1 = dobj["ones"].sum()
s2 = sum(mask.sum() for block, mask in dobj.blocks)
yield assert_equal, s1, s2
+
+
+ at requires_file(os33)
+def test_OWLSDataset():
+ assert isinstance(data_dir_load(os33), OWLSDataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/frontends/sph/tests/test_tipsy.py
--- a/yt/frontends/sph/tests/test_tipsy.py
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -92,3 +92,8 @@
s1 = dobj["ones"].sum()
s2 = sum(mask.sum() for block, mask in dobj.blocks)
yield assert_equal, s1, s2
+
+
+ at requires_file(pkdgrav)
+def test_TipsyDataset():
+ assert isinstance(data_dir_load(pkdgrav), TipsyDataset)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -270,7 +270,6 @@
api_version = get_ipython_api_version()
- stack = inspect.stack()
frame = inspect.stack()[num_up]
loc = frame[0].f_locals.copy()
glo = frame[0].f_globals
@@ -537,7 +536,6 @@
return version_info
def get_script_contents():
- stack = inspect.stack()
top_frame = inspect.stack()[-1]
finfo = inspect.getframeinfo(top_frame[0])
if finfo[2] != "<module>": return None
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -214,40 +214,37 @@
for ftype, fname in fields:
if fname in self.field_list or (ftype, fname) in self.field_list:
fields_to_read.append((ftype, fname))
+ elif fname in self.ds.derived_field_list or (ftype, fname) in self.ds.derived_field_list:
+ fields_to_generate.append((ftype, fname))
else:
- fields_to_generate.append((ftype, fname))
+ raise YTFieldNotFound((ftype,fname), self.ds)
return fields_to_read, fields_to_generate
def _read_particle_fields(self, fields, dobj, chunk = None):
if len(fields) == 0: return {}, []
+ fields_to_read, fields_to_generate = self._split_fields(fields)
+ if len(fields_to_read) == 0:
+ return {}, fields_to_generate
selector = dobj.selector
if chunk is None:
self._identify_base_chunk(dobj)
- fields_to_return = {}
- fields_to_read, fields_to_generate = self._split_fields(fields)
- if len(fields_to_read) == 0:
- return {}, fields_to_generate
fields_to_return = self.io._read_particle_selection(
self._chunk_io(dobj, cache = False),
selector,
fields_to_read)
- for field in fields_to_read:
- ftype, fname = field
- finfo = self.ds._get_field_info(*field)
return fields_to_return, fields_to_generate
def _read_fluid_fields(self, fields, dobj, chunk = None):
if len(fields) == 0: return {}, []
+ fields_to_read, fields_to_generate = self._split_fields(fields)
+ if len(fields_to_read) == 0:
+ return {}, fields_to_generate
selector = dobj.selector
if chunk is None:
self._identify_base_chunk(dobj)
chunk_size = dobj.size
else:
chunk_size = chunk.data_size
- fields_to_return = {}
- fields_to_read, fields_to_generate = self._split_fields(fields)
- if len(fields_to_read) == 0:
- return {}, fields_to_generate
fields_to_return = self.io._read_fluid_selection(
self._chunk_io(dobj),
selector,
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -81,8 +81,6 @@
for lvl in lvl_range:
#grids = self.data_source.select_grids(lvl)
grids = np.array([b for b, mask in self.data_source.blocks if b.Level == lvl])
- gids = np.array([g.id for g in grids if g.Level == lvl],
- dtype="int64")
if len(grids) == 0: continue
self.add_grids(grids)
@@ -93,7 +91,6 @@
grid = self.ds.index.grids[node.grid - self._id_offset]
dds = grid.dds
gle = grid.LeftEdge
- gre = grid.RightEdge
nle = self.ds.arr(get_left_edge(node), input_units="code_length")
nre = self.ds.arr(get_right_edge(node), input_units="code_length")
li = np.rint((nle-gle)/dds).astype('int32')
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -574,7 +574,7 @@
for newp, oldp in zip(new_result["parents"], old_result["parents"]):
assert(newp == oldp)
for newc, oldc in zip(new_result["children"], old_result["children"]):
- assert(newp == oldp)
+ assert(newc == oldc)
class SimulatedHaloMassFunctionTest(AnswerTestingTest):
_type_name = "SimulatedHaloMassFunction"
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py
+++ b/yt/utilities/answer_testing/runner.py
@@ -89,8 +89,7 @@
self.plot_tests = plot_tests
def run_all_tests(self):
- plot_list = []
- for i,name in enumerate(sorted(test_registry)):
+ for name in sorted(test_registry):
self.run_test(name)
return self.passed_tests
@@ -98,7 +97,6 @@
# We'll also need to call the "compare" operation,
# but for that we'll need a data store.
test = test_registry[name]
- plot_list = []
if test.output_type == 'single':
mot = MultipleOutputTest(self.io_log)
for i,fn in enumerate(mot):
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -34,13 +34,7 @@
""" Calculate list of product(psize) subarrays of arr, along with their
left and right edges
"""
- grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
- grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
- n_d = shape
- d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
- grid_left_edges, grid_right_edges, shapes, slices = \
- split_array(bbox[:, 0], bbox[:, 1], shape, psize)
- return grid_left_edges, grid_right_edges, shapes, slices
+ return split_array(bbox[:, 0], bbox[:, 1], shape, psize)
def evaluate_domain_decomposition(n_d, pieces, ldom):
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -147,8 +147,6 @@
for dim in range(3):
sig = self.sigs[dim]
sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
- grid_ends = np.zeros((sig.size, 2))
- ng = 0
center = int((self.flagged.shape[dim] - 1) / 2)
strength = zero_strength = 0
for i in range(1, sig.size-2):
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ b/yt/utilities/fortran_utils.py
@@ -208,7 +208,6 @@
>>> skip(f, 3)
"""
skipped = []
- pos = f.tell()
for i in range(n):
fmt = endian+"I"
size = f.read(struct.calcsize(fmt))
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/lib/tests/test_alt_ray_tracers.py
--- a/yt/utilities/lib/tests/test_alt_ray_tracers.py
+++ b/yt/utilities/lib/tests/test_alt_ray_tracers.py
@@ -14,7 +14,7 @@
def setup():
# set up some sample cylindrical grid data, radiating out from center
- global left_grid, right_grid, amr_levels, center_grid
+ global left_grid, right_grid, amr_levels, center_grid, data
np.seterr(all='ignore')
l1, r1, lvl1 = amrspace([0.0, 1.0, 0.0, -1.0, 0.0, 2*np.pi], levels=(7,7,0))
l2, r2, lvl2 = amrspace([0.0, 1.0, 0.0, 1.0, 0.0, 2*np.pi], levels=(7,7,0))
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -240,7 +240,6 @@
Return an iterator over EnzoSphere objects generated from the appropriate
columns in *filename*. Optionally specify the *unit* radius is in.
"""
- sp_list = []
for line in open(filename):
if line.startswith("#"): continue
vals = line.split()
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -207,7 +207,6 @@
def _generate_post(self):
metadata = self._attrs
- chunks = []
return (metadata, ("chunks", []))
class MinimalNotebook(MinimalRepresentation):
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -57,7 +57,7 @@
ds = self.ds
fields = [f for f in ds.field_list
if not ds.field_info[f].particle_type]
- dsields = [f for f in ds.field_list
+ pfields = [f for f in ds.field_list
if ds.field_info[f].particle_type]
# Preload is only defined for Enzo ...
if ds.index.io._dataset_type == "enzo_packed_3d":
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/spatial/setup.py
--- a/yt/utilities/spatial/setup.py
+++ b/yt/utilities/spatial/setup.py
@@ -4,8 +4,8 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
- from numpy.distutils.system_info import get_info
- from distutils.sysconfig import get_python_inc
+# from numpy.distutils.system_info import get_info
+# from distutils.sysconfig import get_python_inc
config = Configuration('spatial', parent_package, top_path)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/utilities/spatial/setupscons.py
--- a/yt/utilities/spatial/setupscons.py
+++ b/yt/utilities/spatial/setupscons.py
@@ -3,7 +3,7 @@
from os.path import join
def configuration(parent_package = '', top_path = None):
- from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
+ from numpy.distutils.misc_util import Configuration
config = Configuration('spatial', parent_package, top_path)
config.add_data_dir('tests')
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -19,6 +19,12 @@
from yt.funcs import \
get_image_suffix, mylog, iterable
import numpy as np
+try:
+ import brewer2mpl
+ has_brewer = True
+except:
+ has_brewer = False
+
class CallbackWrapper(object):
def __init__(self, viewer, window_plot, frb, field):
@@ -110,6 +116,13 @@
elif (cbnorm == 'linear'):
norm = matplotlib.colors.Normalize()
extent = [float(e) for e in extent]
+ if isinstance(cmap, tuple):
+ if has_brewer:
+ bmap = brewer2mpl.get_map(*cmap)
+ cmap = bmap.get_mpl_colormap(N=cmap[2])
+ else:
+ raise RuntimeError("Please install brewer2mpl to use colorbrewer colormaps")
+
self.image = self.axes.imshow(data.to_ndarray(), origin='lower',
extent=extent, norm=norm, vmin=self.zmin,
aspect=aspect, vmax=self.zmax, cmap=cmap)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -1013,7 +1013,6 @@
for i in range(npanels): ylabels.append("")
d = DualEPS(figsize=figsize)
- count = 0
for j in range(nrow):
invj = nrow - j - 1
ypos = invj*(figsize[1] + margins[1])
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -215,8 +215,8 @@
dy = (self.ylim[1] - self.ylim[0])/self.size[1]
my_lim = (self.xlim[0] + dx*self.start_indices[0],
self.xlim[0] + dx*(self.start_indices[0] + self.my_size[0]),
- self.ylim[0] + dx*self.start_indices[1],
- self.ylim[0] + dx*(self.start_indices[1] + self.my_size[1]))
+ self.ylim[0] + dy*self.start_indices[1],
+ self.ylim[0] + dy*(self.start_indices[1] + self.my_size[1]))
new_buffer = FixedResolutionBuffer(self.source, my_lim, self.my_size)
self._buffer = new_buffer
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -23,6 +23,12 @@
import yt.utilities.lib.image_utilities as au
import yt.utilities.png_writer as pw
from yt.extern.six.moves import builtins
+try:
+ import brewer2mpl
+ has_brewer = True
+except:
+ has_brewer = False
+
def scale_image(image, mi=None, ma=None):
r"""Scale an image ([NxNxM] where M = 1-4) to be uint8 and values scaled
@@ -248,7 +254,14 @@
lut = cmd.color_map_luts[cmap_name]
except KeyError:
try:
- cmap = mcm.get_cmap(cmap_name)
+ if isinstance(cmap_name, tuple):
+ if has_brewer:
+ bmap = brewer2mpl.get_map(*cmap_name)
+ cmap = bmap.get_mpl_colormap(N=cmap_name[2])
+ else:
+ raise RuntimeError("Please install brewer2mpl to use colorbrewer colormaps")
+ else:
+ cmap = mcm.get_cmap(cmap_name)
dummy = cmap(0.0)
lut = cmap._lut.T
except ValueError:
@@ -256,10 +269,19 @@
" colormap file or matplotlib colormaps"
raise KeyError(cmap_name)
- x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
- shape = buff.shape
- mapped = np.dstack(
- [(np.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
+ if isinstance(cmap_name, tuple) and has_brewer:
+ # If we are using the colorbrewer maps, don't interpolate
+ shape = buff.shape
+ # We add float_eps so that digitize doesn't go out of bounds
+ x = np.mgrid[0.0:1.0+np.finfo(np.float32).eps:lut[0].shape[0]*1j]
+ inds = np.digitize(buff.ravel(), x)
+ inds.shape = (shape[0], shape[1])
+ mapped = np.dstack([(v[inds]*255).astype('uint8') for v in lut])
+ del inds
+ else:
+ x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+ mapped = np.dstack(
+ [(np.interp(buff, x, v)*255).astype('uint8') for v in lut ])
return mapped.copy("C")
def strip_colormap_data(fn = "color_map_data.py",
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -35,6 +35,7 @@
from yt.utilities.exceptions import \
YTNotInsideNotebook
+
def invalidate_data(f):
@wraps(f)
def newfunc(*args, **kwargs):
@@ -198,7 +199,7 @@
return self
@invalidate_plot
- def set_cmap(self, field, cmap_name):
+ def set_cmap(self, field, cmap):
"""set the colormap for one of the fields
Parameters
@@ -206,8 +207,11 @@
field : string
the field to set the colormap
if field == 'all', applies to all plots.
- cmap_name : string
- name of the colormap
+ cmap : string or tuple
+ If a string, will be interpreted as name of the colormap.
+ If a tuple, it is assumed to be of the form (name, type, number)
+ to be used for brewer2mpl functionality. (name, type, number, bool)
+ can be used to specify if a reverse colormap is to be used.
"""
@@ -217,7 +221,7 @@
fields = [field]
for field in self.data_source._determine_fields(fields):
self._colorbar_valid = False
- self._colormaps[field] = cmap_name
+ self._colormaps[field] = cmap
return self
@invalidate_plot
@@ -384,37 +388,6 @@
return self.set_font({'size': size})
@invalidate_plot
- def set_cmap(self, field, cmap):
- """set the colormap for one of the fields
-
- Parameters
- ----------
- field : string
- the field to set a transform
- if field == 'all', applies to all plots.
- cmap : string
- name of the colormap
-
- """
- if field == 'all':
- fields = self.plots.keys()
- else:
- fields = [field]
-
- for field in self.data_source._determine_fields(fields):
- self._colorbar_valid = False
- self._colormaps[field] = cmap
- if isinstance(cmap, types.StringTypes):
- if str(cmap) in yt_colormaps:
- cmap = yt_colormaps[str(cmap)]
- elif hasattr(matplotlib.cm, cmap):
- cmap = getattr(matplotlib.cm, cmap)
- if not is_colormap(cmap) and cmap is not None:
- raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
- self.plots[field].image.set_cmap(cmap)
- return self
-
- @invalidate_plot
@invalidate_figure
def set_figure_size(self, size):
"""Sets a new figure size for the plot
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -558,7 +558,6 @@
plot._axes.set_ylabel(self.label)
def get_smallest_appropriate_unit(v, ds):
- max_nu = 1e30
good_u = None
for unit in ['Mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
uq = YTQuantity(1.0, unit)
@@ -708,10 +707,6 @@
dxf = "d%s" % xf
dyf = "d%s" % yf
- DomainRight = plot.data.ds.domain_right_edge
- DomainLeft = plot.data.ds.domain_left_edge
- DomainWidth = DomainRight - DomainLeft
-
nx, ny = plot.image._A.shape
buff = np.zeros((nx,ny),dtype='float64')
for i,clump in enumerate(reversed(self.clumps)):
@@ -790,7 +785,6 @@
plot.data.ds.coordinates.y_axis[ax])
pos = self.pos[xi], self.pos[yi]
else: pos = self.pos
- width,height = plot.image._A.shape
x,y = self.convert_to_plot(plot, pos)
plot._axes.text(x, y, self.text, **self.text_args)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -544,18 +544,17 @@
>>> pp.save()
"""
- for i, p in enumerate(self.profiles):
- if field is 'all':
- fields = self.axes.keys()
- else:
- fields = ensure_list(field)
- for profile in self.profiles:
- for field in profile.data_source._determine_fields(fields):
- if field in profile.field_map:
- field = profile.field_map[field]
- self.axes.ylim[field] = (ymin, ymax)
- # Continue on to the next profile.
- break
+ if field is 'all':
+ fields = self.axes.keys()
+ else:
+ fields = ensure_list(field)
+ for profile in self.profiles:
+ for field in profile.data_source._determine_fields(fields):
+ if field in profile.field_map:
+ field = profile.field_map[field]
+ self.axes.ylim[field] = (ymin, ymax)
+ # Continue on to the next profile.
+ break
return self
def _get_field_log(self, field_y, profile):
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -2082,19 +2082,20 @@
center = np.array(center, dtype='float64')
if weight is not None:
# This is a temporary field, which we will remove at the end.
+ weightfield = ("index", "temp_weightfield")
def _make_wf(f, w):
def temp_weightfield(a, b):
tr = b[f].astype("float64") * b[w]
return b.apply_units(tr, a.units)
return tr
return temp_weightfield
- ds.field_info.add_field("temp_weightfield",
+ ds.field_info.add_field(weightfield,
function=_make_wf(field, weight))
# Now we have to tell the dataset to add it and to calculate
# its dependencies..
- deps, _ = ds.field_info.check_derived_fields(["temp_weightfield"])
+ deps, _ = ds.field_info.check_derived_fields([weightfield])
ds.field_dependencies.update(deps)
- fields = ["temp_weightfield", weight]
+ fields = [weightfield, weight]
nv = 12*nside**2
image = np.zeros((nv,1,4), dtype='float64', order='C')
vs = arr_pix2vec_nest(nside, np.arange(nv))
@@ -2131,8 +2132,8 @@
else:
image[:,:,0] /= image[:,:,1]
image = ds.arr(image, finfo.units)
- ds.field_info.pop("temp_weightfield")
- ds.field_dependencies.pop("temp_weightfield")
+ ds.field_info.pop(weightfield)
+ ds.field_dependencies.pop(weightfield)
return image[:,0,0]
def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
@@ -2172,20 +2173,23 @@
fields = [field]
if self.weight is not None:
- # This is a temporary field, which we will remove at the end.
+ # This is a temporary field, which we will remove at the end
+ # it is given a unique name to avoid conflicting with other
+ # class instances
+ self.weightfield = ("index", "temp_weightfield_%u"%(id(self),))
def _make_wf(f, w):
def temp_weightfield(a, b):
tr = b[f].astype("float64") * b[w]
return b.apply_units(tr, a.units)
return tr
return temp_weightfield
- ds.field_info.add_field("temp_weightfield",
+ ds.field_info.add_field(self.weightfield,
function=_make_wf(self.field, self.weight))
# Now we have to tell the dataset to add it and to calculate
# its dependencies..
- deps, _ = ds.field_info.check_derived_fields(["temp_weightfield"])
+ deps, _ = ds.field_info.check_derived_fields([self.weightfield])
ds.field_dependencies.update(deps)
- fields = ["temp_weightfield", self.weight]
+ fields = [self.weightfield, self.weight]
self.fields = fields
self.log_fields = [False]*len(self.fields)
@@ -2195,6 +2199,20 @@
north_vector=north_vector,
no_ghost=no_ghost)
+ # this would be better in an __exit__ function, but that would require
+ # changes in code that uses this class
+ def __del__(self):
+ if hasattr(self,"weightfield") and hasattr(self,"ds"):
+ try:
+ self.ds.field_info.pop(self.weightfield)
+ self.ds.field_dependencies.pop(self.weightfield)
+ except KeyError:
+ pass
+ try:
+ Camera.__del__(self)
+ except AttributeError:
+ pass
+
def get_sampler(self, args):
if self.interpolated:
sampler = InterpolatedProjectionSampler(*args)
@@ -2377,9 +2395,5 @@
no_ghost=no_ghost, interpolated=interpolated,
north_vector=north_vector)
image = projcam.snapshot()
- if weight is not None:
- ds.field_info.pop("temp_weightfield")
- ds.field_dependencies.pop("temp_weightfield")
- del projcam
return image[:,:]
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -65,7 +65,7 @@
self._colormap = vvt.Colormap()
# create glsl program for this texture...
- self._program1 = program = vvt.GlslProgram()
+ self._program1 = vvt.GlslProgram()
# scale and translation transforms
self._trafo_scale = vv.Transform_Scale()
@@ -287,7 +287,7 @@
ax = vv.gca()
- for i,g in enumerate(gs):
+ for g in gs:
ss = ((g.RightEdge - g.LeftEdge) / (np.array(g.my_data[0].shape)-1)).tolist()
origin = g.LeftEdge.astype("float32").tolist()
dd = (g.my_data[0].astype("float32") - mi)/(ma - mi)
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -51,7 +51,7 @@
self.L = np.array([0.5, 0.5, 0.5])
self.W = 1.5*self.ds.domain_width
self.N = 64
- self.field = "density"
+ self.field = ("gas", "density")
def tearDown(self):
if use_tmpdir:
@@ -61,7 +61,7 @@
def setup_transfer_function(self, camera_type):
if camera_type in ['perspective', 'camera',
'stereopair', 'interactive']:
- mi, ma = self.ds.all_data().quantities['Extrema']("density")
+ mi, ma = self.ds.all_data().quantities['Extrema'](self.field)
tf = ColorTransferFunction((mi, ma),
grey_opacity=True)
tf.map_to_colormap(mi, ma, scale=10., colormap='RdBu_r')
@@ -110,7 +110,7 @@
ds = self.ds
cam = ProjectionCamera(self.c, self.L, self.W, self.N, ds=ds,
- field="density")
+ field=self.field)
cam.snapshot('projection.png')
assert_fname('projection.png')
diff -r c9ba2fc3898dba59b73c0925bfa9e040247daef9 -r acb91ec703a3768e16adeb864b3977cee6bc6779 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -403,7 +403,6 @@
>>> tf = ColorTransferFunction( (-10.0, -5.0) )
>>> tf.add_gaussian(-9.0, 0.01, [1.0, 0.0, 0.0, 1.0])
"""
- alpha = height[3]
for tf, v in zip(self.funcs, height):
tf.add_gaussian(location, width, v)
@@ -551,7 +550,6 @@
label = ''
alpha = self.alpha.y
max_alpha = alpha.max()
- norm = max_alpha
i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
i_data[:,:,0] = np.outer(self.funcs[0].y, np.ones(self.alpha.x.size))
i_data[:,:,1] = np.outer(self.funcs[1].y, np.ones(self.alpha.x.size))
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list