[yt-svn] commit/yt: 25 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Aug 11 11:09:43 PDT 2014


25 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/bdf282fb680a/
Changeset:   bdf282fb680a
Branch:      yt
User:        Andrew Myers
Date:        2013-05-17 23:27:47
Summary:     merging
Affected #:  3 files

diff -r 6718ba4280eb0a5439fc82b7377919b1ec51ef65 -r bdf282fb680a99114db101a9d56b27f9c965ce96 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -32,6 +32,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r 6718ba4280eb0a5439fc82b7377919b1ec51ef65 -r bdf282fb680a99114db101a9d56b27f9c965ce96 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os, sys, os.path, glob, \
+  tempfile, subprocess, shutil
 
 def check_for_png():
     # First up: HDF5_DIR in environment
@@ -97,11 +98,50 @@
     print "You can locate this by looking for the file ft2build.h"
     sys.exit(1)
 
+def check_for_openmp():
+    # Create a temporary directory
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    # Get compiler invocation
+    compiler = os.getenv('CC', 'cc')
+
+    # Attempt to compile a test script.
+    # See http://openmp.org/wp/openmp-compilers/
+    filename = r'test.c'
+    file = open(filename,'w', 0)
+    file.write(
+        "#include <omp.h>\n"
+        "#include <stdio.h>\n"
+        "int main() {\n"
+        "#pragma omp parallel\n"
+        "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
+        "}"
+        )
+    with open(os.devnull, 'w') as fnull:
+        exit_code = subprocess.call([compiler, '-fopenmp', filename],
+                                    stdout=fnull, stderr=fnull)
+        
+    # Clean up
+    file.close()
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+    if exit_code == 0:
+        return True
+    else:
+        return False
+
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('lib',parent_package,top_path)
     png_inc, png_lib = check_for_png()
     freetype_inc, freetype_lib = check_for_freetype()
+    if check_for_openmp() == True:
+        omp_args = ['-fopenmp']
+    else:
+        omp_args = None
     # Because setjmp.h is included by lots of things, and because libpng hasn't
     # always properly checked its header files (see
     # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
@@ -129,8 +169,8 @@
                 depends=["yt/utilities/lib/freetype_includes.h"])
     config.add_extension("geometry_utils", 
                 ["yt/utilities/lib/geometry_utils.pyx"],
-               extra_compile_args=['-fopenmp'],
-               extra_link_args=['-fopenmp'],
+               extra_compile_args=omp_args,
+               extra_link_args=omp_args,
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("Interpolators", 
                 ["yt/utilities/lib/Interpolators.pyx"],
@@ -194,8 +234,8 @@
                  glob.glob("yt/utilities/lib/healpix_*.c"), 
                include_dirs=["yt/utilities/lib/"],
                libraries=["m"], 
-               extra_compile_args=['-fopenmp'],
-               extra_link_args=['-fopenmp'],
+               extra_compile_args=omp_args,
+               extra_link_args=omp_args,
                depends = ["yt/utilities/lib/VolumeIntegrator.pyx",
                           "yt/utilities/lib/fp_utils.pxd",
                           "yt/utilities/lib/kdtree.h",

diff -r 6718ba4280eb0a5439fc82b7377919b1ec51ef65 -r bdf282fb680a99114db101a9d56b27f9c965ce96 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -23,8 +23,6 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import h5py
-try: import pyfits
-except: pass
 import numpy as np
 
 from yt.funcs import *
@@ -44,13 +42,16 @@
         f.close()
     if fits:
         try:
-            hdur = pyfits.PrimaryHDU(image[:,:,0])
-            hdug = pyfits.ImageHDU(image[:,:,1])
-            hdub = pyfits.ImageHDU(image[:,:,2])
-            hdua = pyfits.ImageHDU(image[:,:,3])
-            hdulist = pyfits.HDUList([hdur,hdug,hdub,hdua])
-            hdulist.writeto('%s.fits'%fn,clobber=True)
-        except: mylog.error('You do not have pyfits, install before attempting to use fits exporter')
+            import pyfits
+        except ImportError:
+            mylog.error('You do not have pyfits, install before attempting to use fits exporter')
+            raise
+        hdur = pyfits.PrimaryHDU(image[:,:,0])
+        hdug = pyfits.ImageHDU(image[:,:,1])
+        hdub = pyfits.ImageHDU(image[:,:,2])
+        hdua = pyfits.ImageHDU(image[:,:,3])
+        hdulist = pyfits.HDUList([hdur,hdug,hdub,hdua])
+        hdulist.writeto('%s.fits'%fn,clobber=True)
 
 def import_rgba(name, h5=True):
     """


https://bitbucket.org/yt_analysis/yt/commits/7e4e8ab97c4e/
Changeset:   7e4e8ab97c4e
Branch:      yt
User:        atmyers
Date:        2013-12-17 19:22:32
Summary:     merging
Affected #:  0 files



https://bitbucket.org/yt_analysis/yt/commits/13671958b0fa/
Changeset:   13671958b0fa
Branch:      yt
User:        atmyers
Date:        2013-12-18 00:01:52
Summary:     adding a charm reader that can at least read in the grid quantities
Affected #:  10 files

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/frontends/charm/api.py
--- /dev/null
+++ b/yt/frontends/charm/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.charm
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      CharmGrid, \
+      CharmHierarchy, \
+      CharmStaticOutput
+
+from .fields import \
+      CharmFieldInfo, \
+      add_charm_field
+
+from .io import \
+      IOHandlerCharmHDF5

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/frontends/charm/data_structures.py
--- /dev/null
+++ b/yt/frontends/charm/data_structures.py
@@ -0,0 +1,334 @@
+"""
+Data structures for Charm.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import re
+import os
+import weakref
+import numpy as np
+
+from collections import \
+     defaultdict
+from string import \
+     strip, \
+     rstrip
+from stat import \
+     ST_CTIME
+
+from .definitions import \
+     charm2enzoDict, \
+     yt2charmFieldsDict, \
+     parameterDict \
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+     AMRGridPatch
+from yt.data_objects.hierarchy import \
+     AMRHierarchy
+from yt.data_objects.static_output import \
+     StaticOutput
+from yt.utilities.definitions import \
+     mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     parallel_root_only
+from yt.utilities.io_handler import \
+    io_registry
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import CharmFieldInfo, KnownCharmFields
+
+class CharmGrid(AMRGridPatch):
+    _id_offset = 0
+    __slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level, start, stop):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.ActiveDimensions = stop - start + 1
+
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return np.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+class CharmHierarchy(AMRHierarchy):
+
+    grid = CharmGrid
+
+    def __init__(self,pf,data_style='charm_hdf5'):
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.data_style = data_style
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = os.path.abspath(
+            self.parameter_file.parameter_filename)
+        self.directory = pf.fullpath
+        self._handle = pf._handle
+
+        self.float_type = self._handle['/level_0']['data:datatype=0'].dtype.name
+        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
+        AMRHierarchy.__init__(self,pf,data_style)
+        self._read_particles()
+
+    def _read_particles(self):
+        self.particle_filename = self.hierarchy_filename[:-4] + 'sink'
+        if not os.path.exists(self.particle_filename): return
+        with open(self.particle_filename, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip().split(' ')[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py
+                mask=np.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = np.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+
+    def _detect_fields(self):
+        ncomp = int(self._handle['/'].attrs['num_components'])
+        self.field_list = [c[1] for c in self._handle['/'].attrs.items()[-ncomp:]]
+          
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._handle[lev]['Processors'].len()
+
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        for lev in self._levels:
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            boxes = f[lev]['boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = dx*si.astype(self.float_type)
+                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1)
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class CharmStaticOutput(StaticOutput):
+    _hierarchy_class = CharmHierarchy
+    _fieldinfo_fallback = CharmFieldInfo
+    _fieldinfo_known = KnownCharmFields
+
+    def __init__(self, filename, data_style='charm_hdf5',
+                 storage_filename = None, ini_filename = None):
+        self._handle = h5py.File(filename,'r')
+        self.current_time = self._handle.attrs['time']
+        self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
+        StaticOutput.__init__(self,filename,data_style)
+        self.storage_filename = storage_filename
+        self.cosmological_simulation = False
+
+        # These are parameters that I very much wish to get rid of.
+        self.parameters["HydroMethod"] = 'charm' # always PPM DE
+        self.parameters["DualEnergyFormalism"] = 0 
+        self.parameters["EOSType"] = -1 # default
+
+    def __del__(self):
+        self._handle.close()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        seconds = 1 #self["Time"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = seconds / sec_conversion[unit]
+        for key in yt2charmFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    def _parse_parameter_file(self):
+        """
+        Check to see whether an 'orion2.ini' file
+        exists in the plot file directory. If one does, attempt to parse it.
+        Otherwise grab the dimensions from the hdf5 file.
+        """
+        
+        if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.domain_left_edge = self.__calc_left_edge()
+        self.domain_right_edge = self.__calc_right_edge()
+        self.domain_dimensions = self.__calc_domain_dimensions()
+        self.dimensionality = 3
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self.periodicity = (True, True, True)
+
+    def _parse_inputs_file(self, ini_filename):
+        self.fullplotdir = os.path.abspath(self.parameter_filename)
+        self.ini_filename = self._localize( \
+            self.ini_filename, ini_filename)
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        lines = open(self.ini_filename).readlines()
+        # read the file line by line, storing important parameters
+        for lineI, line in enumerate(lines):
+            try:
+                param, sep, vals = map(rstrip,line.partition(' '))
+            except ValueError:
+                mylog.error("ValueError: '%s'", line)
+            if charm2enzoDict.has_key(param):
+                paramName = charm2enzoDict[param]
+                t = map(parameterDict[paramName], vals.split())
+                if len(t) == 1:
+                    self.parameters[paramName] = t[0]
+                else:
+                    if paramName == "RefineBy":
+                        self.parameters[paramName] = t[0]
+                    else:
+                        self.parameters[paramName] = t
+
+    def __calc_left_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        return LE
+
+    def __calc_right_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        return RE
+
+    def __calc_domain_dimensions(self):
+        fileh = self._handle
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        return R_index - L_index
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            valid = "Charm_global" in fileh["/"]
+            fileh.close()
+            return valid
+        except:
+            pass
+        return False
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/frontends/charm/definitions.py
--- /dev/null
+++ b/yt/frontends/charm/definitions.py
@@ -0,0 +1,54 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                                 }
+
+charm2enzoDict = {"GAMMA": "Gamma",
+                  "Ref_ratio": "RefineBy"
+                                    }
+
+yt2charmFieldsDict = {}
+charm2ytFieldsDict = {}
+

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/frontends/charm/fields.py
--- /dev/null
+++ b/yt/frontends/charm/fields.py
@@ -0,0 +1,76 @@
+"""
+Charm-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+import numpy as np
+
+KnownCharmFields = FieldInfoContainer()
+add_charm_field = KnownCharmFields.add_field
+
+CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = CharmFieldInfo.add_field
+
+add_charm_field("potential", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("potential")],
+                units=r"")
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+        
+    return _Particles
+
+_particle_field_list = ["mass",
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "velocity_x",
+                        "velocity_y",
+                        "velocity_z",
+                        "particle_id"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/frontends/charm/io.py
--- /dev/null
+++ b/yt/frontends/charm/io.py
@@ -0,0 +1,133 @@
+"""
+The data-file handling functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import h5py
+import os
+import re
+import numpy as np
+
+from yt.utilities.io_handler import \
+           BaseIOHandler
+
+class IOHandlerCharmHDF5(BaseIOHandler):
+    _data_style = "charm_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+
+    _field_dict = None
+    @property
+    def field_dict(self):
+        if self._field_dict is not None:
+            return self._field_dict
+        ncomp = int(self._handle['/'].attrs['num_components'])
+        temp =  self._handle['/'].attrs.items()[-ncomp:]
+        val, keys = zip(*temp)
+        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
+        self._field_dict = dict(zip(keys,val))
+        return self._field_dict
+        
+    def _read_field_names(self,grid):
+        ncomp = int(self._handle['/'].attrs['num_components'])
+
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
+    
+    def _read_data(self,grid,field):
+
+        lstring = 'level_%i' % grid.Level
+        lev = self._handle[lstring]
+        dims = grid.ActiveDimensions
+        boxsize = dims.prod()
+        
+        grid_offset = lev[self._offset_string][grid._level_id]
+        start = grid_offset+self.field_dict[field]*boxsize
+        stop = start + boxsize
+        data = lev[self._data_string][start:stop]
+        
+        return data.reshape(dims, order='F')
+
+    def _read_particles(self, grid, field):
+        """
+        parses the Orion Star Particle text files
+             
+        """
+
+        fn = grid.pf.fullplotdir[:-4] + "sink"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+
+        # The basic fields that all sink particles have
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_id': -1}
+
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
+            
+        def read(line, field):
+            return float(line.strip().split(' ')[index[field]])
+
+        fn = grid.pf.fullplotdir[:-4] + "sink"
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z")
+                    if ( (grid.LeftEdge < coord).all() and
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return np.array(particles)

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/frontends/charm/setup.py
--- /dev/null
+++ b/yt/frontends/charm/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('charm', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -321,7 +321,7 @@
         if not os.path.isfile('pluto.ini'):
             try:
                 fileh = h5py.File(args[0],'r')
-                valid = "Chombo_global" in fileh["/"]
+                valid = "Chombo_global" in fileh["/"] and "Charm_global" not in fileh["/"]
                 fileh.close()
                 return valid
             except:

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -16,6 +16,7 @@
     config.add_subpackage("orion")
     config.add_subpackage("stream")
     config.add_subpackage("pluto")
+    config.add_subpackage("charm")
     config.add_subpackage("flash/tests")
     config.add_subpackage("enzo/tests")
     config.add_subpackage("orion/tests")

diff -r 7e4e8ab97c4e28a21cdddab24394fb9d11c39c59 -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -76,6 +76,9 @@
 from yt.frontends.chombo.api import \
     ChomboStaticOutput, ChomboFieldInfo, add_chombo_field
 
+from yt.frontends.charm.api import \
+    CharmStaticOutput, CharmFieldInfo, add_charm_field
+
 from yt.frontends.gdf.api import \
     GDFStaticOutput, GDFFieldInfo, add_gdf_field
 


https://bitbucket.org/yt_analysis/yt/commits/48e6c48cb727/
Changeset:   48e6c48cb727
Branch:      yt
User:        atmyers
Date:        2013-12-20 00:32:03
Summary:     Merged yt_analysis/yt into yt
Affected #:  1 file

diff -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 -r 48e6c48cb72731cd0e230cd9d2c57d55725814bb yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -182,10 +182,14 @@
         LE   = self.domain_left_edge
         RE   = self.domain_right_edge
 
+        # Radmc3D wants the cell wall positions in cgs. Convert here:
+        LE_cgs = LE * self.pf.units['cm']
+        RE_cgs = RE * self.pf.units['cm']
+
         # calculate cell wall positions
-        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
-        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
-        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+        xs = [str(x) for x in np.linspace(LE_cgs[0], RE_cgs[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE_cgs[1], RE_cgs[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE_cgs[2], RE_cgs[2], dims[2]+1)]
 
         # writer file header
         grid_file = open(self.grid_filename, 'w')


https://bitbucket.org/yt_analysis/yt/commits/c622b8ea3b5d/
Changeset:   c622b8ea3b5d
Branch:      yt
User:        atmyers
Date:        2014-01-14 22:58:18
Summary:     a ridiculously inefficient particle reader for CHARM
Affected #:  3 files

diff -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 -r c622b8ea3b5d5b4a0cd463281d82bf66ec1fabb6 yt/frontends/charm/data_structures.py
--- a/yt/frontends/charm/data_structures.py
+++ b/yt/frontends/charm/data_structures.py
@@ -106,32 +106,28 @@
         self._read_particles()
 
     def _read_particles(self):
-        self.particle_filename = self.hierarchy_filename[:-4] + 'sink'
-        if not os.path.exists(self.particle_filename): return
-        with open(self.particle_filename, 'r') as f:
-            lines = f.readlines()
-            self.num_stars = int(lines[0].strip().split(' ')[0])
-            for line in lines[1:]:
-                particle_position_x = float(line.split(' ')[1])
-                particle_position_y = float(line.split(' ')[2])
-                particle_position_z = float(line.split(' ')[3])
-                coord = [particle_position_x, particle_position_y, particle_position_z]
-                # for each particle, determine which grids contain it
-                # copied from object_finding_mixin.py
-                mask=np.ones(self.num_grids)
-                for i in xrange(len(coord)):
-                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = np.where(mask == 1)
-                selected_grids = self.grids[ind]
-                # in orion, particles always live on the finest level.
-                # so, we want to assign the particle to the finest of
-                # the grids we just found
-                if len(selected_grids) != 0:
-                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = np.where(self.grids == grid)[0][0]
-                    self.grid_particle_count[ind] += 1
-                    self.grids[ind].NumberOfParticles += 1
+        self.num_particles = self._handle['particles'].attrs['num_particles']
+        for i in np.arange(self.num_particles):
+            particle_position_x = self._handle['particles/position_x'][i]
+            particle_position_y = self._handle['particles/position_y'][i]
+            particle_position_z = self._handle['particles/position_z'][i]
+            coord = [particle_position_x, particle_position_y, particle_position_z]
+            # for each particle, determine which grids contain it
+            # copied from object_finding_mixin.py
+            mask=np.ones(self.num_grids)
+            for i in xrange(len(coord)):
+                np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+            ind = np.where(mask == 1)
+            selected_grids = self.grids[ind]
+            # in orion, particles always live on the finest level.
+            # so, we want to assign the particle to the finest of
+            # the grids we just found
+            if len(selected_grids) != 0:
+                grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                ind = np.where(self.grids == grid)[0][0]
+                self.grid_particle_count[ind] += 1
+                self.grids[ind].NumberOfParticles += 1
 
     def _detect_fields(self):
         ncomp = int(self._handle['/'].attrs['num_components'])

diff -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 -r c622b8ea3b5d5b4a0cd463281d82bf66ec1fabb6 yt/frontends/charm/fields.py
--- a/yt/frontends/charm/fields.py
+++ b/yt/frontends/charm/fields.py
@@ -35,6 +35,14 @@
                 validators = [ValidateDataField("potential")],
                 units=r"")
 
+add_charm_field("density", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("density")],
+                units=r"")
+
+add_charm_field("pressure", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("pressure")],
+                units=r"")
+
 def particle_func(p_field, dtype='float64'):
     def _Particles(field, data):
         io = data.hierarchy.io
@@ -55,7 +63,7 @@
                         "particle_id"]
 
 for pf in _particle_field_list:
-    pfunc = particle_func("particle_%s" % (pf))
+    pfunc = particle_func("%s" % (pf))
     add_field("particle_%s" % pf, function=pfunc,
               validators = [ValidateSpatial(0)],
               particle_type=True)

diff -r 13671958b0fa7ab5a76f175e450e8a15ed03e373 -r c622b8ea3b5d5b4a0cd463281d82bf66ec1fabb6 yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ b/yt/frontends/charm/io.py
@@ -61,73 +61,15 @@
         
         return data.reshape(dims, order='F')
 
-    def _read_particles(self, grid, field):
-        """
-        parses the Orion Star Particle text files
-             
-        """
-
-        fn = grid.pf.fullplotdir[:-4] + "sink"
-
-        # Figure out the format of the particle file
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-        line = lines[1]
-
-        # The basic fields that all sink particles have
-        index = {'particle_mass': 0,
-                 'particle_position_x': 1,
-                 'particle_position_y': 2,
-                 'particle_position_z': 3,
-                 'particle_momentum_x': 4,
-                 'particle_momentum_y': 5,
-                 'particle_momentum_z': 6,
-                 'particle_angmomen_x': 7,
-                 'particle_angmomen_y': 8,
-                 'particle_angmomen_z': 9,
-                 'particle_id': -1}
-
-        if len(line.strip().split()) == 11:
-            # these are vanilla sinks, do nothing
-            pass  
-
-        elif len(line.strip().split()) == 17:
-            # these are old-style stars, add stellar model parameters
-            index['particle_mlast']     = 10
-            index['particle_r']         = 11
-            index['particle_mdeut']     = 12
-            index['particle_n']         = 13
-            index['particle_mdot']      = 14,
-            index['particle_burnstate'] = 15
-
-        elif len(line.strip().split()) == 18:
-            # these are the newer style, add luminosity as well
-            index['particle_mlast']     = 10
-            index['particle_r']         = 11
-            index['particle_mdeut']     = 12
-            index['particle_n']         = 13
-            index['particle_mdot']      = 14,
-            index['particle_burnstate'] = 15,
-            index['particle_luminosity']= 16
-
-        else:
-            # give a warning if none of the above apply:
-            mylog.warning('Warning - could not figure out particle output file')
-            mylog.warning('These results could be nonsense!')
-            
-        def read(line, field):
-            return float(line.strip().split(' ')[index[field]])
-
-        fn = grid.pf.fullplotdir[:-4] + "sink"
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-            particles = []
-            for line in lines[1:]:
-                if grid.NumberOfParticles > 0:
-                    coord = read(line, "particle_position_x"), \
-                            read(line, "particle_position_y"), \
-                            read(line, "particle_position_z")
-                    if ( (grid.LeftEdge < coord).all() and
-                         (coord <= grid.RightEdge).all() ):
-                        particles.append(read(line, field))
+    def _read_particles(self, grid, name):
+        num_particles = grid.pf.h.num_particles
+        particles = []
+        for i in np.arange(num_particles):
+            particle_position_x = grid.pf.h._handle['particles/position_x'][i]
+            particle_position_y = grid.pf.h._handle['particles/position_y'][i]
+            particle_position_z = grid.pf.h._handle['particles/position_z'][i]
+            coord = [particle_position_x, particle_position_y, particle_position_z]
+            if ( (grid.LeftEdge < coord).all() and
+                 (coord <= grid.RightEdge).all() ):
+                particles.append(grid.pf.h._handle['particles/' + name][i])
         return np.array(particles)


https://bitbucket.org/yt_analysis/yt/commits/4100992bdfa2/
Changeset:   4100992bdfa2
Branch:      yt
User:        atmyers
Date:        2014-01-14 23:01:20
Summary:     merging
Affected #:  1 file

diff -r c622b8ea3b5d5b4a0cd463281d82bf66ec1fabb6 -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -182,10 +182,14 @@
         LE   = self.domain_left_edge
         RE   = self.domain_right_edge
 
+        # Radmc3D wants the cell wall positions in cgs. Convert here:
+        LE_cgs = LE * self.pf.units['cm']
+        RE_cgs = RE * self.pf.units['cm']
+
         # calculate cell wall positions
-        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
-        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
-        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+        xs = [str(x) for x in np.linspace(LE_cgs[0], RE_cgs[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE_cgs[1], RE_cgs[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE_cgs[2], RE_cgs[2], dims[2]+1)]
 
         # writer file header
         grid_file = open(self.grid_filename, 'w')


https://bitbucket.org/yt_analysis/yt/commits/9a38957b04cd/
Changeset:   9a38957b04cd
Branch:      yt
User:        atmyers
Date:        2014-02-04 21:25:49
Summary:     Merged yt_analysis/yt into yt
Affected #:  11 files

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,9 @@
-include distribute_setup.py README* CREDITS COPYING.txt CITATION
+include distribute_setup.py README* CREDITS COPYING.txt CITATION nose.cfg
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
+recursive-include yt *.pyx *.pxd *.h README* *.glsl *.cu
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -503,10 +503,8 @@
     cd $LIB
     if [ ! -z `echo $LIB | grep h5py` ]
     then
-        shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     else
-        shift
         ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     fi
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,9 +1,7 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
 import os.path
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_finding', parent_package, top_path)
@@ -12,6 +10,5 @@
     config.add_subpackage("parallel_hop")
     if os.path.exists("rockstar.cfg"):
         config.add_subpackage("rockstar")
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
+    config.make_config_py()  # installs __config__.py
     return config

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -27,6 +27,7 @@
      cm_per_km, erg_per_keV
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
+from yt.utilities.definitions import mpc_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      op_names, parallel_capable
@@ -415,7 +416,8 @@
     def project_photons(self, L, area_new=None, exp_time_new=None, 
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
-                        sky_center=None, responses=None):
+                        sky_center=None, responses=None,
+                        convolve_energies=False):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -443,8 +445,10 @@
         sky_center : array_like, optional
             Center RA, Dec of the events in degrees.
         responses : list of strings, optional
-            The names of the ARF and RMF files to convolve the photons with.
-
+            The names of the ARF and/or RMF files to convolve the photons with.
+        convolve_energies : boolean, optional
+            If this is set, the photon energies will be convolved with the RMF.
+            
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -486,8 +490,10 @@
         parameters = {}
         
         if responses is not None:
+            responses = ensure_list(responses)
             parameters["ARF"] = responses[0]
-            parameters["RMF"] = responses[1]
+            if len(responses) == 2:
+                parameters["RMF"] = responses[1]
             area_new = parameters["ARF"]
             
         if (exp_time_new is None and area_new is None and
@@ -509,8 +515,13 @@
                 elo = f["SPECRESP"].data.field("ENERG_LO")
                 ehi = f["SPECRESP"].data.field("ENERG_HI")
                 eff_area = np.nan_to_num(f["SPECRESP"].data.field("SPECRESP"))
-                weights = self._normalize_arf(parameters["RMF"])
-                eff_area *= weights
+                if "RMF" in parameters:
+                    weights = self._normalize_arf(parameters["RMF"])
+                    eff_area *= weights
+                else:
+                    mylog.warning("You specified an ARF but not an RMF. This is ok if the "+
+                                  "responses are normalized properly. If not, you may "+
+                                  "get inconsistent results.")
                 f.close()
                 Aratio = eff_area.max()/self.parameters["FiducialArea"]
             else:
@@ -523,7 +534,11 @@
             else:
                 if redshift_new is None:
                     zobs = 0.0
-                    D_A = dist[0]*self.pf.units["kpc"]/self.pf.units[dist[1]]
+                    if dist_new[1] not in mpc_conversion:
+                        mylog.error("Please specify dist_new in one of the following units: %s",
+                                    mpc_conversion.keys())
+                        raise KeyError
+                    D_A = dist_new[0]*mpc_conversion["kpc"]/mpc_conversion[dist_new[1]]
                 else:
                     zobs = redshift_new
                     D_A = self.cosmo.AngularDiameterDistance(0.0,zobs)*1000.
@@ -609,7 +624,7 @@
             
         if comm.rank == 0: mylog.info("Total number of observed photons: %d" % (num_events))
 
-        if responses is not None:
+        if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)
             for k, v in info.items(): parameters[k] = v
                 
@@ -733,8 +748,8 @@
         self.wcs.wcs.cunit = ["deg"]*2                                                
         (self.events["xsky"],
          self.events["ysky"]) = \
-         self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"],
-                                1, ra_dec_order=True)
+         self.wcs.wcs_pix2world(self.events["xpix"],
+                                self.events["ypix"], 1)
 
     def keys(self):
         return self.events.keys()

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,12 +1,10 @@
 #!/usr/bin/env python
-import setuptools
 
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('analysis_modules', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("coordinate_transformation")
     config.add_subpackage("cosmological_observation")
@@ -16,10 +14,15 @@
     config.add_subpackage("halo_profiler")
     config.add_subpackage("hierarchy_subset")
     config.add_subpackage("level_sets")
+    config.add_subpackage("particle_trajectories")
+    config.add_subpackage("photon_simulator")
     config.add_subpackage("radial_column_density")
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
-    config.add_subpackage("sunyaev_zeldovich")    
+    config.add_subpackage("sunrise_export")
+    config.add_subpackage("sunyaev_zeldovich")
+    config.add_subpackage("particle_trajectories")
+    config.add_subpackage("photon_simulator")
     return config

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -319,7 +319,11 @@
         obj = self.get_data("/Objects", name)
         if obj is None:
             return
-        obj = cPickle.loads(obj.value)
+        if isinstance(obj, np.ndarray):
+            obj = obj.tostring()
+        elif hasattr(obj, 'value'):
+            obj = obj.value
+        obj = cPickle.loads(obj)
         if iterable(obj) and len(obj) == 2:
             obj = obj[1] # Just the object, not the pf
         if hasattr(obj, '_fix_pickle'): obj._fix_pickle()

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -15,6 +15,7 @@
 
 import numpy as np
 
+from yt.config import ytcfg
 from yt.funcs import *
 from yt.utilities.lib import \
     get_box_grids_level, \
@@ -54,7 +55,19 @@
 
     def find_max_cell_location(self, field, finest_levels = 3):
         if finest_levels is not False:
-            gi = (self.grid_levels >= self.max_level - finest_levels).ravel()
+            # This prevents bad values for the case that the number of grids to
+            # search is smaller than the number of processors being applied to
+            # the task, by 
+            nproc = ytcfg.getint("yt", "__topcomm_parallel_size")
+            while 1:
+                gi = (self.grid_levels >= self.max_level - finest_levels).ravel()
+                if gi.sum() >= nproc:
+                    break
+                elif finest_levels >= self.max_level:
+                    raise YTTooParallel
+                else:
+                    finest_levels += 1
+                
             source = self.grid_collection([0.0]*3, self.grids[gi])
         else:
             source = self.all_data()

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -210,6 +210,10 @@
 class YTEmptyProfileData(Exception):
     pass
 
+class YTTooParallel(YTException):
+    def __str__(self):
+        return "You've used too many processors for this dataset."
+
 class YTDuplicateFieldInProfile(Exception):
     def __init__(self, field, new_spec, old_spec):
         self.field = field

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -31,6 +31,7 @@
 from .profile_plotter import PhasePlot
 from .plot_modifications import get_smallest_appropriate_unit
 
+
 class DualEPS(object):
     def __init__(self, figsize=(12,12)):
         r"""Initializes the DualEPS class to which we can progressively add layers
@@ -540,6 +541,12 @@
 
         # Scale the colorbar
         shift = (0.5*(1.0-shrink[0])*size[0], 0.5*(1.0-shrink[1])*size[1])
+        # To facilitate strething rather than shrinking
+        # If stretched in both directions (makes no sense?) then y dominates. 
+        if(shrink[0] > 1.0):
+            shift = (0.05*self.figsize[0], 0.5*(1.0-shrink[1])*size[1])
+        if(shrink[1] > 1.0):
+            shift = (0.5*(1.0-shrink[0])*size[0], 0.05*self.figsize[1])
         size = (size[0] * shrink[0], size[1] * shrink[1])
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
@@ -694,6 +701,59 @@
 
 #=============================================================================
 
+    def arrow(self, size=0.2, label="", loc=(0.05,0.08), labelloc="top",
+              color=pyx.color.cmyk.white,
+              linewidth=pyx.style.linewidth.normal):
+        r"""Draws an arrow in the current figure
+
+        Parameters
+        ----------
+        size : float
+            Length of arrow (base to tip) in units of the figure size.
+        label : string
+            Annotation label of the arrow.
+        loc : tuple of floats
+            Location of the left hand side of the arrow in units of
+            the figure size.
+        labelloc : string
+            Location of the label with respect to the line.  Can be
+            "top" or "bottom"
+        color : `pyx.color.*.*`
+            Color of the arrow.  Example: pyx.color.cymk.white
+        linewidth : `pyx.style.linewidth.*`
+            Width of the arrow.  Example: pyx.style.linewidth.normal
+
+        Examples
+        --------
+        >>> d = DualEPS()
+        >>> d.axis_box(xrange=(0,100), yrange=(1e-3,1), ylog=True)
+        >>> d.insert_image("arrow_image.jpg")
+        >>> d.arrow(size=0.2, label="Black Hole!", loc=(0.05, 0.1))
+        >>> d.save_fig()
+        """
+        line = pyx.path.line(self.figsize[0]*loc[0],
+                             self.figsize[1]*loc[1],
+                             self.figsize[0]*(loc[0]+size),
+                             self.figsize[1]*loc[1])
+        self.canvas.stroke(line, [linewidth, color, pyx.deco.earrow()])
+       
+
+        if labelloc == "bottom":
+            yoff = -0.1*size
+            valign = pyx.text.valign.top
+        else:
+            yoff = +0.1*size
+            valign = pyx.text.valign.bottom
+        if label != "":
+            self.canvas.text(self.figsize[0]*(loc[0]+0.5*size),
+                             self.figsize[1]*(loc[1]+yoff), label,
+                             [color, valign, pyx.text.halign.center])
+
+        
+
+
+#=============================================================================
+
     def scale_line(self, size=0.2, label="", loc=(0.05,0.08), labelloc="top",
                    color=pyx.color.cmyk.white,
                    linewidth=pyx.style.linewidth.normal):
@@ -724,6 +784,7 @@
         >>> d.scale_line(size=0.2, label="1 kpc", loc=(0.05, 0.1))
         >>> d.save_fig()
         """
+        
         line = pyx.path.line(self.figsize[0]*loc[0],
                              self.figsize[1]*loc[1],
                              self.figsize[0]*(loc[0]+size),
@@ -937,7 +998,8 @@
     d = DualEPS(figsize=figsize)
     count = 0
     for j in range(nrow):
-        ypos = j*(figsize[1] + margins[1])
+        invj = nrow - j - 1
+        ypos = invj*(figsize[1] + margins[1])
         for i in range(ncol):
             xpos = i*(figsize[0] + margins[0])
             index = j*ncol + i
@@ -1003,7 +1065,8 @@
             100.0 * d.canvas.bbox().bottom().t,
             100.0 * d.canvas.bbox().top().t - d.figsize[1])
     for j in range(nrow):
-        ypos0 = j*(figsize[1] + margins[1])
+        invj = nrow - j - 1
+        ypos0 = invj*(figsize[1] + margins[1])
         for i in range(ncol):
             xpos0 = i*(figsize[0] + margins[0])
             index = j*ncol + i

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1149,8 +1149,6 @@
         self.set_axes_unit(axes_unit)
 
     def _recreate_frb(self):
-        if self._frb is not None:
-            raise NotImplementedError
         super(OffAxisProjectionPlot, self)._recreate_frb()
 
 _metadata_template = """

diff -r 4100992bdfa286fce6b9e0e93dcc0487fa453a26 -r 9a38957b04cd627b1d575524d44409a281bbf955 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -641,6 +641,7 @@
                   [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
                 for label in labels:
                     label.set_color(self._font_color)
+        self._plot_valid = True
 
     def save(self, name=None, mpl_kwargs=None):
         r"""


https://bitbucket.org/yt_analysis/yt/commits/517b2518f094/
Changeset:   517b2518f094
Branch:      yt
User:        atmyers
Date:        2014-02-05 19:55:03
Summary:     a more sensible particle reader
Affected #:  3 files

diff -r 9a38957b04cd627b1d575524d44409a281bbf955 -r 517b2518f0946139f5ef72310826da788f061f9d yt/frontends/charm/data_structures.py
--- a/yt/frontends/charm/data_structures.py
+++ b/yt/frontends/charm/data_structures.py
@@ -106,32 +106,24 @@
         self._read_particles()
 
     def _read_particles(self):
-        self.num_particles = self._handle['particles'].attrs['num_particles']
-        for i in np.arange(self.num_particles):
-            particle_position_x = self._handle['particles/position_x'][i]
-            particle_position_y = self._handle['particles/position_y'][i]
-            particle_position_z = self._handle['particles/position_z'][i]
-            coord = [particle_position_x, particle_position_y, particle_position_z]
-            # for each particle, determine which grids contain it
-            # copied from object_finding_mixin.py
-            mask=np.ones(self.num_grids)
-            for i in xrange(len(coord)):
-                np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-            ind = np.where(mask == 1)
-            selected_grids = self.grids[ind]
-            # in orion, particles always live on the finest level.
-            # so, we want to assign the particle to the finest of
-            # the grids we just found
-            if len(selected_grids) != 0:
-                grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                ind = np.where(self.grids == grid)[0][0]
-                self.grid_particle_count[ind] += 1
-                self.grids[ind].NumberOfParticles += 1
+        
+        self.num_particles = 0
+        for key, val in self._handle.items():
+            if key.startswith('level'):
+                self.num_particles += val['particles:offsets'][:].sum()
+
+        particles_per_grid = self._handle["level_0/particles:offsets"]
+        for i, grid in enumerate(self.grids):
+            self.grids[i].NumberOfParticles = particles_per_grid[i]
+            self.grid_particle_count[i] = particles_per_grid[i]
+
+        assert(self.num_particles == self.grid_particle_count.sum())
 
     def _detect_fields(self):
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        self.field_list = [c[1] for c in self._handle['/'].attrs.items()[-ncomp:]]
+        self.field_list = []
+        for key, val in self._handle.attrs.items():
+            if key.startswith("component"):
+                self.field_list.append(val)
           
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -200,7 +192,7 @@
     def __init__(self, filename, data_style='charm_hdf5',
                  storage_filename = None, ini_filename = None):
         self._handle = h5py.File(filename,'r')
-        self.current_time = self._handle.attrs['time']
+        self.current_time = self._handle['level_0'].attrs['time']
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
         StaticOutput.__init__(self,filename,data_style)

diff -r 9a38957b04cd627b1d575524d44409a281bbf955 -r 517b2518f0946139f5ef72310826da788f061f9d yt/frontends/charm/fields.py
--- a/yt/frontends/charm/fields.py
+++ b/yt/frontends/charm/fields.py
@@ -39,9 +39,10 @@
                 validators = [ValidateDataField("density")],
                 units=r"")
 
-add_charm_field("pressure", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("pressure")],
-                units=r"")
+def _Density(field, data):
+    return data["density"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
 
 def particle_func(p_field, dtype='float64'):
     def _Particles(field, data):
@@ -60,7 +61,9 @@
                         "velocity_x",
                         "velocity_y",
                         "velocity_z",
-                        "particle_id"]
+                        "acceleration_x",
+                        "acceleration_y"
+                        "acceleration_z"]
 
 for pf in _particle_field_list:
     pfunc = particle_func("%s" % (pf))

diff -r 9a38957b04cd627b1d575524d44409a281bbf955 -r 517b2518f0946139f5ef72310826da788f061f9d yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ b/yt/frontends/charm/io.py
@@ -35,16 +35,16 @@
     def field_dict(self):
         if self._field_dict is not None:
             return self._field_dict
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        temp =  self._handle['/'].attrs.items()[-ncomp:]
-        val, keys = zip(*temp)
-        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
-        self._field_dict = dict(zip(keys,val))
+        field_dict = {}
+        for key, val in self._handle.attrs.items():
+            if key.startswith('component_'):
+                comp_number = int(re.match('component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._field_dict = field_dict
         return self._field_dict
         
-    def _read_field_names(self,grid):
+    def _read_field_names(self, grid):
         ncomp = int(self._handle['/'].attrs['num_components'])
-
         fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
@@ -62,14 +62,28 @@
         return data.reshape(dims, order='F')
 
     def _read_particles(self, grid, name):
-        num_particles = grid.pf.h.num_particles
-        particles = []
-        for i in np.arange(num_particles):
-            particle_position_x = grid.pf.h._handle['particles/position_x'][i]
-            particle_position_y = grid.pf.h._handle['particles/position_y'][i]
-            particle_position_z = grid.pf.h._handle['particles/position_z'][i]
-            coord = [particle_position_x, particle_position_y, particle_position_z]
-            if ( (grid.LeftEdge < coord).all() and
-                 (coord <= grid.RightEdge).all() ):
-                particles.append(grid.pf.h._handle['particles/' + name][i])
-        return np.array(particles)
+
+        particle_field_index = {'position_x': 0,
+                                'position_y': 1,
+                                'position_z': 2,
+                                'velocity_x': 3,
+                                'velocity_y': 4,
+                                'velocity_z': 5,
+                                'mass': 6,
+                                'acceleration_x': 7,
+                                'acceleration_y': 8,
+                                'acceleration_z': 9}
+
+        field_index = particle_field_index[name]
+
+        particles_per_cell = self._handle['level_0/particles:offsets'].value
+        items_per_particle = len(particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_cell)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+        
+    
+        data = self._handle['level_0/particles:data'][offsets[grid.id]:offsets[grid.id + 1]]
+        return data[field_index::items_per_particle]


https://bitbucket.org/yt_analysis/yt/commits/668a39de9efe/
Changeset:   668a39de9efe
Branch:      yt
User:        atmyers
Date:        2014-02-05 22:21:40
Summary:     Merged yt_analysis/yt into yt
Affected #:  1 file

diff -r 517b2518f0946139f5ef72310826da788f061f9d -r 668a39de9efeb35333cb2ec9de0c7b549a168d53 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -349,6 +349,8 @@
         if len(self.parameters) == 0:
             self._parse_parameter_file()
         self.conversion_factors = defaultdict(lambda: 1.0)    
+        if "EOSType" not in self.parameters:
+            self.parameters["EOSType"] = -1
         if self.specified_parameters.has_key("LengthUnits") :
             self._setup_getunits_units()
         else :


https://bitbucket.org/yt_analysis/yt/commits/3a500ecfb6ac/
Changeset:   3a500ecfb6ac
Branch:      yt
User:        atmyers
Date:        2014-02-06 22:35:08
Summary:     forgot to check in a couple of fixes to the particle reader
Affected #:  2 files

diff -r 668a39de9efeb35333cb2ec9de0c7b549a168d53 -r 3a500ecfb6ac2ee49c729e65fd0f92bb96cf0529 yt/frontends/charm/fields.py
--- a/yt/frontends/charm/fields.py
+++ b/yt/frontends/charm/fields.py
@@ -62,7 +62,7 @@
                         "velocity_y",
                         "velocity_z",
                         "acceleration_x",
-                        "acceleration_y"
+                        "acceleration_y",
                         "acceleration_z"]
 
 for pf in _particle_field_list:

diff -r 668a39de9efeb35333cb2ec9de0c7b549a168d53 -r 3a500ecfb6ac2ee49c729e65fd0f92bb96cf0529 yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ b/yt/frontends/charm/io.py
@@ -69,10 +69,10 @@
                                 'velocity_x': 3,
                                 'velocity_y': 4,
                                 'velocity_z': 5,
-                                'mass': 6,
-                                'acceleration_x': 7,
-                                'acceleration_y': 8,
-                                'acceleration_z': 9}
+                                'acceleration_x': 6,
+                                'acceleration_y': 7,
+                                'acceleration_z': 8,
+                                'mass': 9}
 
         field_index = particle_field_index[name]
 


https://bitbucket.org/yt_analysis/yt/commits/60a931c22405/
Changeset:   60a931c22405
Branch:      yt
User:        atmyers
Date:        2014-03-18 00:16:56
Summary:     adding support for dimensions < 3
Affected #:  3 files

diff -r 3a500ecfb6ac2ee49c729e65fd0f92bb96cf0529 -r 60a931c22405bb0f5ea47349cd59a28eb26293bc yt/frontends/charm/data_structures.py
--- a/yt/frontends/charm/data_structures.py
+++ b/yt/frontends/charm/data_structures.py
@@ -48,7 +48,10 @@
 
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
-from .fields import CharmFieldInfo, KnownCharmFields
+from .fields import \
+    CharmFieldInfo, Charm2DFieldInfo, Charm1DFieldInfo, \
+    add_charm_field, add_charm_2d_field, add_charm_1d_field, \
+    KnownCharmFields
 
 class CharmGrid(AMRGridPatch):
     _id_offset = 0
@@ -92,6 +95,12 @@
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
         self.data_style = data_style
+
+        if pf.dimensionality == 1:
+            self.data_style = "charm1d_hdf5"
+        if pf.dimensionality == 2:
+            self.data_style = "charm2d_hdf5"
+
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
@@ -141,20 +150,38 @@
         grids = []
         self.dds_list = []
         i = 0
-        for lev in self._levels:
+        D = self.parameter_file.dimensionality
+        for lev_index, lev in enumerate(self._levels):
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
             self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
             for level_id, box in enumerate(boxes):
-                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
                 pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
                 grids.append(pg)
                 grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type)
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1)
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
@@ -242,62 +269,48 @@
         return f
 
     def _parse_parameter_file(self):
-        """
-        Check to see whether an 'orion2.ini' file
-        exists in the plot file directory. If one does, attempt to parse it.
-        Otherwise grab the dimensions from the hdf5 file.
-        """
         
-        if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
         self.domain_left_edge = self.__calc_left_edge()
         self.domain_right_edge = self.__calc_right_edge()
         self.domain_dimensions = self.__calc_domain_dimensions()
-        self.dimensionality = 3
+
+        if self.dimensionality == 1:
+            self._fieldinfo_fallback = Charm1DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self._fieldinfo_fallback = Charm2DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+        
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True, True, True)
-
-    def _parse_inputs_file(self, ini_filename):
-        self.fullplotdir = os.path.abspath(self.parameter_filename)
-        self.ini_filename = self._localize( \
-            self.ini_filename, ini_filename)
-        self.unique_identifier = \
-                               int(os.stat(self.parameter_filename)[ST_CTIME])
-        lines = open(self.ini_filename).readlines()
-        # read the file line by line, storing important parameters
-        for lineI, line in enumerate(lines):
-            try:
-                param, sep, vals = map(rstrip,line.partition(' '))
-            except ValueError:
-                mylog.error("ValueError: '%s'", line)
-            if charm2enzoDict.has_key(param):
-                paramName = charm2enzoDict[param]
-                t = map(parameterDict[paramName], vals.split())
-                if len(t) == 1:
-                    self.parameters[paramName] = t[0]
-                else:
-                    if paramName == "RefineBy":
-                        self.parameters[paramName] = t[0]
-                    else:
-                        self.parameters[paramName] = t
+        self.periodicity = (True,) * self.dimensionality
 
     def __calc_left_edge(self):
         fileh = self._handle
         dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        D = self.dimensionality
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
         return LE
 
     def __calc_right_edge(self):
         fileh = self._handle
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        D = self.dimensionality
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
         return RE
 
     def __calc_domain_dimensions(self):
         fileh = self._handle
-        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        D = self.dimensionality
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
         return R_index - L_index
 
     @classmethod

diff -r 3a500ecfb6ac2ee49c729e65fd0f92bb96cf0529 -r 60a931c22405bb0f5ea47349cd59a28eb26293bc yt/frontends/charm/fields.py
--- a/yt/frontends/charm/fields.py
+++ b/yt/frontends/charm/fields.py
@@ -25,12 +25,12 @@
 import yt.data_objects.universal_fields
 import numpy as np
 
+CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = CharmFieldInfo.add_field
+
 KnownCharmFields = FieldInfoContainer()
 add_charm_field = KnownCharmFields.add_field
 
-CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = CharmFieldInfo.add_field
-
 add_charm_field("potential", function=NullFunc, take_log=False,
                 validators = [ValidateDataField("potential")],
                 units=r"")
@@ -39,6 +39,18 @@
                 validators = [ValidateDataField("density")],
                 units=r"")
 
+add_charm_field("gravitational_field_x", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_x")],
+                units=r"")
+
+add_charm_field("gravitational_field_y", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_y")],
+                units=r"")
+
+add_charm_field("gravitational_field_z", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_z")],
+                units=r"")
+
 def _Density(field, data):
     return data["density"]
 add_field("Density",function=_Density, take_log=True,
@@ -85,3 +97,54 @@
 add_field("ParticleMassMsun",
           function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
           particle_type=True)
+
+#do overrides for 2D
+
+Charm2DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_2d_field = Charm2DFieldInfo.add_field
+
+def _gravitational_field_z(field, data):
+    return np.zeros(data['gravitational_field_x'].shape,
+                    dtype='float64')
+add_charm_2d_field("gravitational_field_z", function=_gravitational_field_z)
+
+def _particle_position_z(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+add_charm_2d_field("particle_position_z", function=_particle_position_z)
+
+def _particle_velocity_z(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+add_charm_2d_field("particle_velocity_z", function=_particle_velocity_z)
+
+def _particle_acceleration_z(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+add_charm_2d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+#do overrides for 1D
+
+Charm1DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_1d_field = Charm1DFieldInfo.add_field
+
+def _gravitational_field_y(field, data):
+    return np.zeros(data['gravitational_field_y'].shape,
+                    dtype='float64')
+
+def _particle_position_y(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+
+def _particle_velocity_y(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+
+def _particle_acceleration_y(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+
+add_charm_1d_field("gravitational_field_z", function=_gravitational_field_z)
+add_charm_1d_field("gravitational_field_y", function=_gravitational_field_y)
+
+add_charm_1d_field("particle_position_z", function=_particle_position_z)
+add_charm_1d_field("particle_velocity_z", function=_particle_velocity_z)
+add_charm_1d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+add_charm_1d_field("particle_position_y", function=_particle_position_y)
+add_charm_1d_field("particle_velocity_y", function=_particle_velocity_y)
+add_charm_1d_field("particle_acceleration_y", function=_particle_acceleration_y)
\ No newline at end of file

diff -r 3a500ecfb6ac2ee49c729e65fd0f92bb96cf0529 -r 60a931c22405bb0f5ea47349cd59a28eb26293bc yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ b/yt/frontends/charm/io.py
@@ -87,3 +87,66 @@
     
         data = self._handle['level_0/particles:data'][offsets[grid.id]:offsets[grid.id + 1]]
         return data[field_index::items_per_particle]
+
+class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm2d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+
+    def _read_particles(self, grid, name):
+
+        particle_field_index = {'position_x': 0,
+                                'position_y': 1,
+                                'velocity_x': 2,
+                                'velocity_y': 3,
+                                'acceleration_x': 4,
+                                'acceleration_y': 5,
+                                'mass': 6}
+
+        field_index = particle_field_index[name]
+
+        particles_per_cell = self._handle['level_0/particles:offsets'].value
+        items_per_particle = len(particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_cell)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+           
+        data = self._handle['level_0/particles:data'][offsets[grid.id]:offsets[grid.id + 1]]
+        return data[field_index::items_per_particle]
+
+class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm1d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+
+    def _read_particles(self, grid, name):
+
+        particle_field_index = {'position_x': 0,
+                                'velocity_x': 1,
+                                'acceleration_x': 2,
+                                'mass': 3}
+
+        field_index = particle_field_index[name]
+
+        particles_per_cell = self._handle['level_0/particles:offsets'].value
+        items_per_particle = len(particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_cell)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+           
+        data = self._handle['level_0/particles:data'][offsets[grid.id]:offsets[grid.id + 1]]
+        return data[field_index::items_per_particle]
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/6dcaaa51c03a/
Changeset:   6dcaaa51c03a
Branch:      yt
User:        atmyers
Date:        2014-03-18 01:16:04
Summary:     Merged yt_analysis/yt into yt
Affected #:  7 files

diff -r 60a931c22405bb0f5ea47349cd59a28eb26293bc -r 6dcaaa51c03aa06e3089655b7bf2bef9848e3d7a yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -4,10 +4,9 @@
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
 
-
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
-        minError=1E-5, complexLim=.999,
-        fitLim=.99, minLength=3, 
+        minError=1E-4, complexLim=.995,
+        fitLim=.97, minLength=3, 
         maxLength=1000, splitLim=.99,
         output_file=None):
 
@@ -90,6 +89,7 @@
     fluxData[0]=1
     fluxData[-1]=1
 
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -111,6 +111,7 @@
             yDatBounded=fluxData[b[1]:b[2]]
             yFitBounded=yFit[b[1]:b[2]]
 
+
             #Find init redshift
             z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
 
@@ -121,24 +122,33 @@
 
             #Fit Using complex tools
             newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
-                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+                    z,fitLim,minError,speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+               newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError)
+
+            if na.size(newLinesP)> 0:
+
+                #Check for EXPLOOOOSIIONNNSSS
+                newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
+
 
             #Check existence of partner lines if applicable
             if len(speciesDict['wavelength']) != 1:
                 newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                        b, minError*(b[2]-b[1]),
-                        x0, xRes, speciesDict)
+                        b, minError, x0, xRes, speciesDict)
 
-            #If flagged as a bad fit, species is lyman alpha,
-            #   and it may be a saturated line, use special tools
-            if flag and species=='lya' and min(yDatBounded)<.1:
-                newLinesP=_large_flag_fit(xBounded,yDatBounded,
-                        yFitBounded,z,speciesDict,
-                        minSize,minError*(b[2]-b[1]))
+
+
 
             #Adjust total current fit
             yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
 
+
             #Add new group to all fitted lines
             if na.size(newLinesP)>0:
                 speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
@@ -149,6 +159,7 @@
 
         allSpeciesLines[species]=speciesLines
 
+
     if output_file:
         _output_fit(allSpeciesLines, output_file)
 
@@ -205,10 +216,12 @@
     #Setup initial line guesses
     if initP==None: #Regular fit
         initP = [0,0,0] 
-        if min(yDat)<.5: #Large lines get larger initial guess 
-            initP[0] = 10**16
+        if min(yDat)<.01: #Large lines get larger initial guess 
+            initP[0] = speciesDict['init_N']*10**2
+        elif min(yDat)<.5:
+            initP[0] = speciesDict['init_N']*10**1
         elif min(yDat)>.9: #Small lines get smaller initial guess
-            initP[0] = 10**12.5
+            initP[0] = speciesDict['init_N']*10**-1
         else:
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
@@ -225,9 +238,16 @@
         return [],False
     
     #Values to proceed through first run
-    errSq,prevErrSq=1,1000
+    errSq,prevErrSq,prevLinesP=1,10*len(x),[]
 
+    if errBound == None:
+        errBound = len(yDat)*(max(1-yDat)*1E-2)**2
+    else:
+        errBound = errBound*len(yDat)
+
+    flag = False
     while True:
+
         #Initial parameter guess from joining parameters from all lines
         #   in lines into a single array
         initP = linesP.flatten()
@@ -237,6 +257,7 @@
                 args=(x,yDat,yFit,speciesDict),
                 epsfcn=1E-10,maxfev=1000)
 
+
         #Set results of optimization
         linesP = na.reshape(fitP,(-1,3))
 
@@ -247,17 +268,23 @@
         #Sum to get idea of goodness of fit
         errSq=sum(dif**2)
 
+        if any(linesP[:,1]==speciesDict['init_b']):
+         #   linesP = prevLinesP
+
+            flag = True
+            break
+            
         #If good enough, break
-        if errSq < errBound: 
+        if errSq < errBound:        
             break
 
         #If last fit was worse, reject the last line and revert to last fit
-        if errSq > prevErrSq*10:
+        if errSq > prevErrSq*10 :
             #If its still pretty damn bad, cut losses and try flag fit tools
             if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                linesP = prevLinesP
                 break
 
         #If too many lines 
@@ -266,21 +293,26 @@
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                break 
+                flag = True
+                break
 
         #Store previous data in case reject next fit
         prevErrSq = errSq
         prevLinesP = linesP
 
-
         #Set up initial condition for new line
         newP = [0,0,0] 
-        if min(dif)<.1:
-            newP[0]=10**12
-        elif min(dif)>.9:
-            newP[0]=10**16
+
+        yAdjusted = 1+yFit*yNewFit-yDat
+ 
+        if min(yAdjusted)<.01: #Large lines get larger initial guess 
+            newP[0] = speciesDict['init_N']*10**2
+        elif min(yAdjusted)<.5:
+            newP[0] = speciesDict['init_N']*10**1
+        elif min(yAdjusted)>.9: #Small lines get smaller initial guess
+            newP[0] = speciesDict['init_N']*10**-1
         else:
-            newP[0]=10**14
+            newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
         linesP=na.append(linesP,[newP],axis=0)
@@ -290,12 +322,12 @@
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict)
+        check=_check_params(na.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
     linesP = na.delete(linesP,remove,axis=0)
 
-    return linesP,False
+    return linesP,flag
 
 def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
     """
@@ -489,6 +521,9 @@
     #List of lines to remove
     removeLines=[]
 
+    #Set error
+
+
     #Iterate through all sets of line parameters
     for i,p in enumerate(linesP):
 
@@ -501,16 +536,23 @@
             lb = _get_bounds(p[2],b,wl,x0,xRes)
             xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
 
+            if errBound == None:
+                errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
+            else:
+                errBound = 10*errBound*len(yb)
+
             #Generate a fit and find the difference to data
             yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
             dif =yb-yFitb
 
+
+
             #Only counts as an error if line is too big ---------------<
             dif = [k for k in dif if k>0]
             err = sum(dif)
 
             #If the fit is too bad then add the line to list of removed lines
-            if err > errBound*1E2:
+            if err > errBound:
                 removeLines.append(i)
                 break
 
@@ -640,21 +682,13 @@
         #Check if the region needs to be divided
         if b[2]-b[1]>maxLength:
 
-            #Find the minimum absorption in the middle two quartiles of
-            #   the large complex
-            q=(b[2]-b[1])/4
-            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+            split = _split_region(yDat,b,splitLim)
 
-            #Only break it up if the minimum absorption is actually low enough
-            if yDat[cut]>splitLim:
-
-                #Get the new two peaks
-                b1Peak = yDat[b[1]:cut].argmin()+b[1]
-                b2Peak = yDat[cut:b[2]].argmin()+cut
+            if split:
 
                 #add the two regions separately
-                cBounds.insert(i+1,[b1Peak,b[1],cut])
-                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+                cBounds.insert(i+1,split[0])
+                cBounds.insert(i+2,split[1])
 
                 #Remove the original region
                 cBounds.pop(i)
@@ -663,7 +697,33 @@
 
     return cBounds
 
-def _gen_flux_lines(x, linesP, speciesDict):
+
+def _split_region(yDat,b,splitLim):
+        #Find the minimum absorption in the middle two quartiles of
+    #   the large complex
+
+    q=(b[2]-b[1])/4
+    cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+    #Only break it up if the minimum absorption is actually low enough
+    if yDat[cut]>splitLim:
+
+        #Get the new two peaks
+        b1Peak = yDat[b[1]:cut].argmin()+b[1]
+        b2Peak = yDat[cut:b[2]].argmin()+cut
+
+        region_1 = [b1Peak,b[1],cut]
+        region_2 = [b2Peak,cut,b[2]]
+
+        return [region_1,region_2]
+
+    else:
+
+        return []
+
+
+
+def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
     """
     Calculates the normalized flux for a region of wavelength space
     generated by a set of absorption lines.
@@ -692,6 +752,9 @@
             g=speciesDict['Gamma'][i]
             wl=speciesDict['wavelength'][i]
             y = y+ _gen_tau(x,p,f,g,wl)
+            if firstLine: 
+                break
+
     flux = na.exp(-y)
     return flux
 
@@ -744,21 +807,25 @@
         the difference between the fit generated by the parameters
         given in pTotal multiplied by the previous fit and the desired
         flux profile, w/ first index modified appropriately for bad 
-        parameter choices
+        parameter choices and additional penalty for fitting with a lower
+        flux than observed.
     """
 
     pTotal.shape = (-1,3)
     yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
 
     error = yDat-yFit*yNewFit
-    error[0] = _check_params(pTotal,speciesDict)
+    error_plus = (yDat-yFit*yNewFit).clip(min=0)
+
+    error = error+error_plus
+    error[0] = _check_params(pTotal,speciesDict,x)
 
     return error
 
-def _check_params(p, speciesDict):
+def _check_params(p, speciesDict,xb):
     """
     Check to see if any of the parameters in p fall outside the range 
-        given in speciesDict.
+        given in speciesDict or on the boundaries
 
     Parameters
     ----------
@@ -767,6 +834,8 @@
     speciesDict : dictionary
         dictionary with properties giving the max and min
         values appropriate for each parameter N,b, and z.
+    xb : (N) ndarray
+        wavelength array [nm]
 
     Returns
     -------
@@ -774,16 +843,137 @@
         0 if all values are fine
         999 if any values fall outside acceptable range
     """
+
+    minz = (xb[0])/speciesDict['wavelength'][0]-1
+    maxz = (xb[-1])/speciesDict['wavelength'][0]-1
+
     check = 0
-    if any(p[:,0] > speciesDict['maxN']) or\
-          any(p[:,0] < speciesDict['minN']) or\
-          any(p[:,1] > speciesDict['maxb']) or\
-          any(p[:,1] < speciesDict['minb']) or\
-          any(p[:,2] > speciesDict['maxz']) or\
-          any(p[:,2] < speciesDict['minz']):
+    if any(p[:,0] >= speciesDict['maxN']) or\
+          any(p[:,0] <= speciesDict['minN']) or\
+          any(p[:,1] >= speciesDict['maxb']) or\
+          any(p[:,1] <= speciesDict['minb']) or\
+          any(p[:,2] >= maxz) or\
+          any(p[:,2] <= minz):
               check = 999
+              
     return check
 
+def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
+
+    """
+    Check to see if any of the parameters in p are the
+    same as initial paramters and if so, attempt to 
+    split the region and refit it.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    """
+
+    # Check if anything is a default parameter
+    if any(p[:,0] == speciesDict['init_N']) or\
+          any(p[:,0] == speciesDict['init_N']*10) or\
+          any(p[:,0] == speciesDict['init_N']*100) or\
+          any(p[:,0] == speciesDict['init_N']*.1) or\
+          any(p[:,1] == speciesDict['init_b']) or\
+          any(p[:,1] == speciesDict['maxb']):
+
+            # These are the initial bounds
+            init_bounds = [yDat.argmin(),0,len(xb)-1]
+
+            # Gratitutous limit for splitting region
+            newSplitLim = 1 - (1-min(yDat))*.5
+
+            # Attempt to split region
+            split = _split_region(yDat,init_bounds,newSplitLim)
+            
+            # If we can't split it, just reject it. Its unphysical
+            # to just keep the default parameters and we're out of
+            # options at this point
+            if not split:
+                return []
+
+            # Else set up the bounds for each region and fit separately
+            b1,b2 = split[0][2], split[1][1]
+
+            p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
+                            initz, minSize, errorBound, speciesDict)
+
+            p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
+                            initz, minSize, errorBound, speciesDict)
+
+            # Make the final line parameters. Its annoying because
+            # one or both regions may have fit to nothing
+            if na.size(p1)> 0 and na.size(p2)>0:
+                p = na.r_[p1,p2]
+            elif na.size(p1) > 0:
+                p = p1
+            else:
+                p = p2
+
+    return p
+
+
+def _check_numerical_instability(x, p, speciesDict,b):
+
+    """
+    Check to see if any of the parameters in p are causing
+    unstable numerical effects outside the region of fit
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    b : (3) list
+        list of integers indicating bounds of region fit in x
+    """
+
+    remove_lines = []
+
+
+    for i,line in enumerate(p):
+
+        # First to check if the line is at risk for instability
+        if line[1]<5 or line[0] < 1E12:
+
+
+            # get all flux that isn't part of fit plus a little wiggle room
+            # max and min to prevent boundary errors
+
+            flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
+            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+
+            #Find regions that are absorbing outside the region we fit
+            flux_dif = 1 - flux
+            absorbing_coefficient = max(abs(flux_dif))
+
+
+            #Really there shouldn't be any absorption outside
+            #the region we fit, but we'll give some leeway.
+            #for high resolution spectra the tiny bits on the edges
+            #can give a non negligible amount of flux. Plus the errors
+            #we are looking for are HUGE.
+            if absorbing_coefficient > .1:
+
+                # we just set it to no fit because we've tried
+                # everything else at this point. this region just sucks :(
+                remove_lines.append(i)
+    
+    if remove_lines:
+        p = na.delete(p, remove_lines, axis=0)
+
+    return p
 
 def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
     """
@@ -815,4 +1005,5 @@
         f.create_dataset("{0}/z".format(ion),data=params['z'])
         f.create_dataset("{0}/complex".format(ion),data=params['group#'])
     print 'Writing spectrum fit to {0}'.format(file_name)
+    f.close()
 

diff -r 60a931c22405bb0f5ea47349cd59a28eb26293bc -r 6dcaaa51c03aa06e3089655b7bf2bef9848e3d7a yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -15,9 +15,18 @@
 import os
 from yt.funcs import *
 import h5py
+
+try:
+    import xspec
+except ImportError:
+    pass
+
 try:
     import astropy.io.fits as pyfits
-    import xspec
+except ImportError:
+    pass
+
+try:
     from scipy.integrate import cumtrapz
     from scipy import stats        
 except ImportError:

diff -r 60a931c22405bb0f5ea47349cd59a28eb26293bc -r 6dcaaa51c03aa06e3089655b7bf2bef9848e3d7a yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -355,7 +355,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter],
+                           data["particle_mass"][filter].astype(np.float64),
                            np.int64(np.where(filter)[0].size),
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -429,7 +429,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
                            data["particle_position_y"].astype(np.float64),
                            data["particle_position_z"].astype(np.float64),
-                           particle_field_data,
+                           particle_field_data.astype(np.float64),
                            data["particle_position_x"].size,
                            top, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -440,7 +440,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
                            data["particle_position_y"].astype(np.float64),
                            data["particle_position_z"].astype(np.float64),
-                           data["particle_mass"],
+                           data["particle_mass"].astype(np.float64),
                            data["particle_position_x"].size,
                            bottom, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -470,7 +470,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                           data["particle_position_y"][filter].astype(np.float64),
                           data["particle_position_z"][filter].astype(np.float64),
-                          particle_field_data,
+                          particle_field_data.astype(np.float64),
                           np.int64(np.where(filter)[0].size),
                           top, np.array(data.LeftEdge).astype(np.float64),
                           np.array(data.ActiveDimensions).astype(np.int32), 
@@ -481,7 +481,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                           data["particle_position_y"][filter].astype(np.float64),
                           data["particle_position_z"][filter].astype(np.float64),
-                          data["particle_mass"][filter],
+                          data["particle_mass"][filter].astype(np.float64),
                           np.int64(np.where(filter)[0].size),
                           bottom, np.array(data.LeftEdge).astype(np.float64),
                           np.array(data.ActiveDimensions).astype(np.int32), 

diff -r 60a931c22405bb0f5ea47349cd59a28eb26293bc -r 6dcaaa51c03aa06e3089655b7bf2bef9848e3d7a yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -422,7 +422,7 @@
         self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         if self.dimensionality < 3:
-            for d in (dimensionality)+range(3-dimensionality):
+            for d in [dimensionality]+range(3-dimensionality):
                 if self.domain_left_edge[d] == self.domain_right_edge[d]:
                     mylog.warning('Identical domain left edge and right edges '
                                   'along dummy dimension (%i), attempting to read anyway' % d)

diff -r 60a931c22405bb0f5ea47349cd59a28eb26293bc -r 6dcaaa51c03aa06e3089655b7bf2bef9848e3d7a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -885,6 +885,7 @@
          If set to 'c', 'center' or left blank, the plot is centered on the
          middle of the domain. If set to 'max' or 'm', the center will be at 
          the point of highest density.
+    width : tuple or a float.
          Width can have four different formats to support windows with variable
          x and y widths.  They are:
 


https://bitbucket.org/yt_analysis/yt/commits/746098ff0df5/
Changeset:   746098ff0df5
Branch:      yt
User:        atmyers
Date:        2014-03-18 22:48:48
Summary:     Merged yt_analysis/yt into yt
Affected #:  3 files

diff -r 6dcaaa51c03aa06e3089655b7bf2bef9848e3d7a -r 746098ff0df5ec9db51ac03dd38ba035adcc7a1b yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -17,6 +17,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.utilities.exceptions import YTNotInsideNotebook
 import _colormap_data as cmd
 import yt.utilities.lib as au
 import __builtin__

diff -r 6dcaaa51c03aa06e3089655b7bf2bef9848e3d7a -r 746098ff0df5ec9db51ac03dd38ba035adcc7a1b yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 
 
+import __builtin__
 import base64
 import types
 
@@ -26,6 +27,7 @@
 
 from matplotlib.font_manager import FontProperties
 
+from ._mpl_imports import FigureCanvasAgg
 from .plot_window import WindowPlotMPL
 from .base_plot_types import ImagePlotMPL
 from .plot_container import \
@@ -35,6 +37,8 @@
     write_image, apply_colormap
 from yt.data_objects.profiles import \
      create_profile
+from yt.utilities.exceptions import \
+     YTNotInsideNotebook
 from yt.utilities.lib import \
     write_png_to_string
 from yt.data_objects.profiles import \
@@ -44,7 +48,8 @@
 import _mpl_imports as mpl
 from yt.funcs import \
      ensure_list, \
-     get_image_suffix
+     get_image_suffix, \
+     get_ipython_api_version
 
 def get_canvas(name):
     suffix = get_image_suffix(name)

diff -r 6dcaaa51c03aa06e3089655b7bf2bef9848e3d7a -r 746098ff0df5ec9db51ac03dd38ba035adcc7a1b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -18,6 +18,7 @@
 
 from yt.funcs import *
 from yt.utilities.math_utils import *
+from yt.utilities.exceptions import YTNotInsideNotebook
 from copy import deepcopy
 
 from .grid_partitioner import HomogenizedVolume


https://bitbucket.org/yt_analysis/yt/commits/cfd059b50099/
Changeset:   cfd059b50099
Branch:      yt
User:        atmyers
Date:        2014-03-23 20:22:32
Summary:     Merged yt_analysis/yt into yt
Affected #:  1 file

diff -r 746098ff0df5ec9db51ac03dd38ba035adcc7a1b -r cfd059b5009999f8ecd53b73655a53f92c9d3b49 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -7,6 +7,7 @@
 from functools import wraps
 from matplotlib.font_manager import FontProperties
 
+from ._mpl_imports import FigureCanvasAgg
 from .tick_locators import LogLocator, LinearLocator
 from .color_maps import yt_colormaps, is_colormap
 from .plot_modifications import \


https://bitbucket.org/yt_analysis/yt/commits/aded42c36c7e/
Changeset:   aded42c36c7e
Branch:      yt
User:        atmyers
Date:        2014-03-30 23:46:01
Summary:     patching chombo particle reader to work with AMR
Affected #:  2 files

diff -r cfd059b5009999f8ecd53b73655a53f92c9d3b49 -r aded42c36c7ee638a2d8a99b2873419f30c415d1 yt/frontends/charm/data_structures.py
--- a/yt/frontends/charm/data_structures.py
+++ b/yt/frontends/charm/data_structures.py
@@ -90,6 +90,7 @@
 class CharmHierarchy(AMRHierarchy):
 
     grid = CharmGrid
+    _data_file = None
 
     def __init__(self,pf,data_style='charm_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
@@ -109,7 +110,7 @@
         self.directory = pf.fullpath
         self._handle = pf._handle
 
-        self.float_type = self._handle['/level_0']['data:datatype=0'].dtype.name
+        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
         self._levels = [key for key in self._handle.keys() if key.startswith('level')]
         AMRHierarchy.__init__(self,pf,data_style)
         self._read_particles()
@@ -117,11 +118,13 @@
     def _read_particles(self):
         
         self.num_particles = 0
+        particles_per_grid = []
         for key, val in self._handle.items():
             if key.startswith('level'):
-                self.num_particles += val['particles:offsets'][:].sum()
+                level_particles = val['particles:offsets'][:]
+                self.num_particles += level_particles.sum()
+                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
 
-        particles_per_grid = self._handle["level_0/particles:offsets"]
         for i, grid in enumerate(self.grids):
             self.grids[i].NumberOfParticles = particles_per_grid[i]
             self.grid_particle_count[i] = particles_per_grid[i]
@@ -153,7 +156,10 @@
         D = self.parameter_file.dimensionality
         for lev_index, lev in enumerate(self._levels):
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            boxes = f[lev]['boxes'].value
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
             dx = f[lev].attrs['dx']
             self.dds_list.append(dx * np.ones(3))
 

diff -r cfd059b5009999f8ecd53b73655a53f92c9d3b49 -r aded42c36c7ee638a2d8a99b2873419f30c415d1 yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ b/yt/frontends/charm/io.py
@@ -75,17 +75,24 @@
                                 'mass': 9}
 
         field_index = particle_field_index[name]
+        lev = 'level_%s' % grid.Level
 
-        particles_per_cell = self._handle['level_0/particles:offsets'].value
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
         items_per_particle = len(particle_field_index)
 
         # compute global offset position
-        offsets = items_per_particle * np.cumsum(particles_per_cell)
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
         offsets = np.append(np.array([0]), offsets)
         offsets = np.array(offsets, dtype=np.int64)
-        
-    
-        data = self._handle['level_0/particles:data'][offsets[grid.id]:offsets[grid.id + 1]]
+
+        # convert between the global grid id and the id on this level            
+        grid_levels = np.array([g.Level for g in self.pf.h.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
         return data[field_index::items_per_particle]
 
 class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
@@ -109,16 +116,23 @@
                                 'mass': 6}
 
         field_index = particle_field_index[name]
+        lev = 'level_%s' % grid.Level
 
-        particles_per_cell = self._handle['level_0/particles:offsets'].value
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
         items_per_particle = len(particle_field_index)
 
         # compute global offset position
-        offsets = items_per_particle * np.cumsum(particles_per_cell)
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
         offsets = np.append(np.array([0]), offsets)
         offsets = np.array(offsets, dtype=np.int64)
-           
-        data = self._handle['level_0/particles:data'][offsets[grid.id]:offsets[grid.id + 1]]
+            
+        grid_levels = np.array([g.Level for g in self.pf.h.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
         return data[field_index::items_per_particle]
 
 class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
@@ -139,14 +153,21 @@
                                 'mass': 3}
 
         field_index = particle_field_index[name]
+        lev = 'level_%s' % grid.Level
 
-        particles_per_cell = self._handle['level_0/particles:offsets'].value
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
         items_per_particle = len(particle_field_index)
 
         # compute global offset position
-        offsets = items_per_particle * np.cumsum(particles_per_cell)
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
         offsets = np.append(np.array([0]), offsets)
         offsets = np.array(offsets, dtype=np.int64)
-           
-        data = self._handle['level_0/particles:data'][offsets[grid.id]:offsets[grid.id + 1]]
+            
+        grid_levels = np.array([g.Level for g in self.pf.h.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
         return data[field_index::items_per_particle]
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/bbcdfb8a3f0f/
Changeset:   bbcdfb8a3f0f
Branch:      yt
User:        atmyers
Date:        2014-03-30 23:52:50
Summary:     getting a little more code reuse
Affected #:  1 file

diff -r aded42c36c7ee638a2d8a99b2873419f30c415d1 -r bbcdfb8a3f0f2851de3fae12cc79f680d56beb3f yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ b/yt/frontends/charm/io.py
@@ -29,6 +29,16 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'position_z': 2,
+                                      'velocity_x': 3,
+                                      'velocity_y': 4,
+                                      'velocity_z': 5,
+                                      'acceleration_x': 6,
+                                      'acceleration_y': 7,
+                                      'acceleration_z': 8,
+                                      'mass': 9}
 
     _field_dict = None
     @property
@@ -63,22 +73,11 @@
 
     def _read_particles(self, grid, name):
 
-        particle_field_index = {'position_x': 0,
-                                'position_y': 1,
-                                'position_z': 2,
-                                'velocity_x': 3,
-                                'velocity_y': 4,
-                                'velocity_z': 5,
-                                'acceleration_x': 6,
-                                'acceleration_y': 7,
-                                'acceleration_z': 8,
-                                'mass': 9}
-
-        field_index = particle_field_index[name]
+        field_index = self._particle_field_index[name]
         lev = 'level_%s' % grid.Level
 
         particles_per_grid = self._handle[lev]['particles:offsets'].value
-        items_per_particle = len(particle_field_index)
+        items_per_particle = len(self._particle_field_index)
 
         # compute global offset position
         offsets = items_per_particle * np.cumsum(particles_per_grid)
@@ -104,36 +103,14 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'velocity_x': 2,
+                                      'velocity_y': 3,
+                                      'acceleration_x': 4,
+                                      'acceleration_y': 5,
+                                      'mass': 6}
 
-    def _read_particles(self, grid, name):
-
-        particle_field_index = {'position_x': 0,
-                                'position_y': 1,
-                                'velocity_x': 2,
-                                'velocity_y': 3,
-                                'acceleration_x': 4,
-                                'acceleration_y': 5,
-                                'mass': 6}
-
-        field_index = particle_field_index[name]
-        lev = 'level_%s' % grid.Level
-
-        particles_per_grid = self._handle[lev]['particles:offsets'].value
-        items_per_particle = len(particle_field_index)
-
-        # compute global offset position
-        offsets = items_per_particle * np.cumsum(particles_per_grid)
-        offsets = np.append(np.array([0]), offsets)
-        offsets = np.array(offsets, dtype=np.int64)
-            
-        grid_levels = np.array([g.Level for g in self.pf.h.grids])
-        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
-        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
-        lo = grid.id - grid_level_offset
-        hi = lo + 1
-
-        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-        return data[field_index::items_per_particle]
 
 class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
     _data_style = "charm1d_hdf5"
@@ -144,30 +121,7 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
-
-    def _read_particles(self, grid, name):
-
-        particle_field_index = {'position_x': 0,
-                                'velocity_x': 1,
-                                'acceleration_x': 2,
-                                'mass': 3}
-
-        field_index = particle_field_index[name]
-        lev = 'level_%s' % grid.Level
-
-        particles_per_grid = self._handle[lev]['particles:offsets'].value
-        items_per_particle = len(particle_field_index)
-
-        # compute global offset position
-        offsets = items_per_particle * np.cumsum(particles_per_grid)
-        offsets = np.append(np.array([0]), offsets)
-        offsets = np.array(offsets, dtype=np.int64)
-            
-        grid_levels = np.array([g.Level for g in self.pf.h.grids])
-        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
-        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
-        lo = grid.id - grid_level_offset
-        hi = lo + 1
-
-        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-        return data[field_index::items_per_particle]
\ No newline at end of file
+        self._particle_field_index = {'position_x': 0,
+                                      'velocity_x': 1,
+                                      'acceleration_x': 2,
+                                      'mass': 3}
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/af74e7e1bf6f/
Changeset:   af74e7e1bf6f
Branch:      yt
User:        atmyers
Date:        2014-03-31 21:58:31
Summary:     Merged yt_analysis/yt into yt
Affected #:  1 file

diff -r bbcdfb8a3f0f2851de3fae12cc79f680d56beb3f -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -811,6 +811,7 @@
         self.pf = data_source.pf
         self.field_data = YTFieldData()
         self.weight_field = weight_field
+        ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
 
     def add_fields(self, fields):
         fields = ensure_list(fields)
@@ -822,7 +823,9 @@
     def _finalize_storage(self, fields, temp_storage):
         # We use our main comm here
         # This also will fill _field_data
-        # FIXME: Add parallelism and combining std stuff
+        temp_storage.values = self.comm.mpi_allreduce(temp_storage.values, op="sum", dtype="float64")
+        temp_storage.weight_values = self.comm.mpi_allreduce(temp_storage.weight_values, op="sum", dtype="float64")
+        temp_storage.used = self.comm.mpi_allreduce(temp_storage.used, op="sum", dtype="bool")
         if self.weight_field is not None:
             temp_storage.values /= temp_storage.weight_values[...,None]
         blank = ~temp_storage.used


https://bitbucket.org/yt_analysis/yt/commits/5112b4710de5/
Changeset:   5112b4710de5
Branch:      yt
User:        atmyers
Date:        2014-05-05 19:00:11
Summary:     Merged yt_analysis/yt into yt
Affected #:  11 files

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -396,8 +396,9 @@
                   vector_length(my_segment['start'], my_segment['end']) * \
                   self.cosmology.HubbleConstantNow * \
                   self.cosmology.ExpansionFactor(my_segment['redshift'])
-                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
-                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+                next_redshift = my_segment["redshift"] + \
+                  np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                          (1. - h_vel / speed_of_light_cgs)) + 1.
             elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -41,7 +41,7 @@
     star_creation_time : Ordered array or list of floats
         The creation time for the stars in code units.
     volume : Float
-        The volume of the region for the specified list of stars.
+        The comoving volume of the region for the specified list of stars.
     bins : Integer
         The number of time bins used for binning the stars. Default = 300.
     
@@ -69,7 +69,7 @@
                 If data_source is not provided, all of these paramters need to be set:
                 star_mass (array, Msun),
                 star_creation_time (array, code units),
-                volume (float, Mpc**3).
+                volume (float, cMpc**3).
                 """)
                 return None
             self.mode = 'provided'
@@ -125,14 +125,14 @@
         """
         if self.mode == 'data_source':
             try:
-                vol = self._data_source.volume('mpc')
+                vol = self._data_source.volume('mpccm')
             except AttributeError:
                 # If we're here, this is probably a HOPHalo object, and we
                 # can get the volume this way.
                 ds = self._data_source.get_sphere()
-                vol = ds.volume('mpc')
+                vol = ds.volume('mpccm')
         elif self.mode == 'provided':
-            vol = self.volume
+            vol = self.volume('mpccm')
         tc = self._pf["Time"]
         self.time = []
         self.lookback_time = []
@@ -148,6 +148,7 @@
             self.redshift.append(self.cosm.ComputeRedshiftFromTime(time * tc))
             self.Msol_yr.append(self.mass_bins[i] / \
                 (self.time_bins_dt[i] * tc / YEAR))
+            # changed vol from mpc to mpccm used in literature
             self.Msol_yr_vol.append(self.mass_bins[i] / \
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -215,8 +215,12 @@
         if fields == None: fields = []
         self.fields = ensure_list(fields)[:]
         self.field_data = YTFieldData()
+        self._default_field_parameters = {}
+        self._default_field_parameters["center"] = np.zeros(3, dtype='float64')
+        self._default_field_parameters["bulk_velocity"] = np.zeros(3, dtype='float64')
+        self._default_field_parameters["normal"] = np.array([0,0,1], dtype='float64')
         self.field_parameters = {}
-        self.__set_default_field_parameters()
+        self._set_default_field_parameters()
         self._cut_masks = {}
         self._point_indices = {}
         self._vc_data = {}
@@ -224,10 +228,13 @@
             mylog.debug("Setting %s to %s", key, val)
             self.set_field_parameter(key, val)
 
-    def __set_default_field_parameters(self):
-        self.set_field_parameter("center",np.zeros(3,dtype='float64'))
-        self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
-        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
+    def _set_default_field_parameters(self):
+        for k,v in self._default_field_parameters.items():
+            self.set_field_parameter(k,v)
+
+    def _is_default_field_parameter(self, parameter):
+        if parameter not in self._default_field_parameters: return False
+        return self._default_field_parameters[parameter] is self.field_parameters[parameter]
 
     def _set_center(self, center):
         if center is None:
@@ -783,7 +790,6 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1)
         pids = np.where(points_in_grid)[0]
@@ -1772,8 +1778,10 @@
             self._distributed = False
             self._okay_to_serialize = False
             self._check_region = True
+            # Use the data_source's field parameters if they don't exist in the
+            # object or if they are the default values
             for k, v in source.field_parameters.items():
-                if k not in self.field_parameters:
+                if k not in self.field_parameters or self._is_default_field_parameter(k):
                     self.set_field_parameter(k,v)
         self.source = source
         if self._field_cuts is not None:
@@ -2115,7 +2123,7 @@
             self._okay_to_serialize = False
             self._check_region = True
             for k, v in source.field_parameters.items():
-                if k not in self.field_parameters:
+                if k not in self.field_parameters or self._is_default_field_parameter(k):
                     self.set_field_parameter(k,v)
         self.source = source
         if self._field_cuts is not None:

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -70,5 +70,8 @@
             v1 = proj["Density"].sum()
             v2 = (dd["Density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
-
-
+        # test if projections inherit the field parameters of their data sources
+        dd.set_field_parameter("bulk_velocity", np.array([0,1,2]))
+        proj = pf.h.proj(0, "Density", source=dd)
+        yield assert_equal, dd.field_parameters["bulk_velocity"], \
+                proj.field_parameters["bulk_velocity"]

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -114,7 +114,12 @@
                 return self.get_range(key.start, key.stop)
             # This will return a sliced up object!
             return TimeSeriesData(self._pre_outputs[key], self.parallel)
-        o = self._pre_outputs[key]
+        try: 
+            o = self._pre_outputs[key]
+        except IndexError:
+            raise InvalidSimulationTimeSeries("Your TimeSeries is empty. \n" + 
+                "Confirm you are running from the simulation source directory.")
+
         if isinstance(o, types.StringTypes):
             o = load(o,**self.kwargs)
         return o

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -33,6 +33,7 @@
 
 cdef extern from "math.h": 
     double expf(double x) nogil 
+    int isnormal(double x) nogil
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -58,6 +59,7 @@
     cdef np.float64_t bv, dy, dd, tf, rv
     cdef int bin_id
     if dvs[fit.field_id] >= fit.bounds[1] or dvs[fit.field_id] <= fit.bounds[0]: return 0.0
+    if not isnormal(dvs[fit.field_id]): return 0.0
     bin_id = <int> ((dvs[fit.field_id] - fit.bounds[0]) * fit.idbin)
     bin_id = iclip(bin_id, 0, fit.nbins-2)
     dd = dvs[fit.field_id] - (fit.bounds[0] + bin_id * fit.dbin) # x - x0

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -15,6 +15,7 @@
 import pyx
 import numpy as np
 from matplotlib import cm
+import matplotlib.pyplot as plt
 from _mpl_imports import FigureCanvasAgg
 
 from yt.utilities.logger import ytLogger as mylog
@@ -288,9 +289,7 @@
         """
         if isinstance(plot, (PlotWindow, PhasePlot)):
             plot.refresh()
-        else:
-            plot._redraw_image()
-        if isinstance(plot, (VMPlot, PlotWindow)):
+        elif isinstance(plot, (VMPlot, PlotWindow)):
             if isinstance(plot, PlotWindow):
                 data = plot._frb
                 width = plot.width[0]
@@ -344,6 +343,26 @@
                     _ylabel = plot[k].axes.get_ylabel()
             if tickcolor == None:
                 _tickcolor = None
+        elif isinstance(plot, np.ndarray):
+            ax = plt.gca()
+            _xrange = ax.get_xlim()
+            _yrange = ax.get_ylim()
+            _xlog=False
+            _ylog=False
+            if bare_axes:
+                _xlabel = ""
+                _ylabel = ""
+            else:
+                if xlabel != None:
+                    _xlabel = xlabel
+                else:
+                    _xlabel = ax.get_xlabel()
+                if ylabel != None:
+                    _ylabel = ylabel
+                else:
+                    _ylabel = ax.get_ylabel()
+            if tickcolor == None:
+                _tickcolor = None
         else:
             _xrange = plot._axes.get_xlim()
             _yrange = plot._axes.get_ylim()
@@ -461,6 +480,13 @@
             # Remove colorbar
             _p1 = plot._figure
             _p1.delaxes(_p1.axes[1])
+        elif isinstance(plot, np.ndarray):
+            fig = plt.figure()
+            iplot = plt.figimage(plot)
+            _p1 =  iplot.figure
+            _p1.set_size_inches(self.figsize[0], self.figsize[1])
+            ax = plt.gca();
+            _p1.add_axes(ax)
         else:
             raise RuntimeError("Unknown plot type")
 
@@ -855,7 +881,7 @@
         
 #=============================================================================
 
-    def save_fig(self, filename="test", format="eps"):
+    def save_fig(self, filename="test", format="eps", resolution=250):
         r"""Saves current figure to a file.
 
         Parameters
@@ -875,6 +901,10 @@
             self.canvas.writeEPSfile(filename)
         elif format == "pdf":
             self.canvas.writePDFfile(filename)
+        elif format == "png":
+             self.canvas.writeGSfile(filename+".png", "png16m", resolution=resolution)
+        elif format == "jpg":
+             self.canvas.writeGSfile(filename+".jpeg", "jpeg", resolution=resolution)
         else:
             raise RuntimeError("format %s unknown." % (format))
             

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -28,6 +28,8 @@
     sec_per_day, sec_per_hr
 from yt.visualization.image_writer import apply_colormap
 
+from matplotlib.colors import colorConverter
+
 import _MPL
 
 callback_registry = {}
@@ -337,20 +339,25 @@
 class GridBoundaryCallback(PlotCallback):
     """
     annotate_grids(alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
-                 min_level=None, max_level=None, cmap='B-W LINEAR_r'):
+                 min_level=None, max_level=None, cmap='B-W LINEAR_r', edgecolors=None,
+                 linewidth=1.0):
 
     Draws grids on an existing PlotWindow object.
     Adds grid boundaries to a plot, optionally with alpha-blending. By default, 
     colors different levels of grids with different colors going from white to
-    black, but you can change to any arbitrary colormap with cmap keyword 
-    (or all black cells for all levels with cmap=None).  Cuttoff for display is at 
-    min_pix wide. draw_ids puts the grid id in the corner of the grid. 
+    black, but you can change to any arbitrary colormap with cmap keyword, to all black
+    grid edges for all levels with cmap=None and edgecolors=None, or to an arbitrary single
+    color for grid edges with edgecolors='YourChosenColor' defined in any of the standard ways
+    (e.g., edgecolors='white', edgecolors='r', edgecolors='#00FFFF', or edgecolor='0.3', where
+    the last is a float in 0-1 scale indicating gray).
+    Note that setting edgecolors overrides cmap if you have both set to non-None values.
+    Cutoff for display is at min_pix wide. draw_ids puts the grid id in the corner of the grid. 
     (Not so great in projections...).  One can set min and maximum level of
-    grids to display.
+    grids to display, and can change the linewidth of the displayed grids.
     """
     _type_name = "grids"
     def __init__(self, alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
-                 min_level=None, max_level=None, cmap='B-W LINEAR_r'):
+                 min_level=None, max_level=None, cmap='B-W LINEAR_r', edgecolors = None, linewidth=1.0):
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
@@ -359,7 +366,9 @@
         self.periodic = periodic
         self.min_level = min_level
         self.max_level = max_level
+        self.linewidth = linewidth
         self.cmap = cmap
+        self.edgecolors = edgecolors
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -399,13 +408,17 @@
                        ( levels >= min_level) & \
                        ( levels <= max_level)
 
-            if self.cmap is not None: 
-                edgecolors = apply_colormap(levels[(levels <= max_level) & (levels >= min_level)]*1.0,
-                                  color_bounds=[0,plot.data.pf.h.max_level],
-                                  cmap_name=self.cmap)[0,:,:]*1.0/255.
-                edgecolors[:,3] = self.alpha
-            else:
-                edgecolors = (0.0,0.0,0.0,self.alpha)
+            # Grids can either be set by edgecolors OR a colormap.
+            if self.edgecolors is not None: 
+                edgecolors = colorConverter.to_rgba(self.edgecolors, alpha=self.alpha)
+            else:  # use colormap if not explicity overridden by edgecolors
+                if self.cmap is not None: 
+                    edgecolors = apply_colormap(levels[(levels <= max_level) & (levels >= min_level)]*1.0,
+                                                color_bounds=[0,plot.data.pf.h.max_level],
+                                                cmap_name=self.cmap)[0,:,:]*1.0/255.
+                    edgecolors[:,3] = self.alpha
+                else:
+                    edgecolors = (0.0,0.0,0.0,self.alpha)
 
             if visible.nonzero()[0].size == 0: continue
             verts = np.array(
@@ -414,7 +427,7 @@
             verts=verts.transpose()[visible,:,:]
             grid_collection = matplotlib.collections.PolyCollection(
                 verts, facecolors="none",
-                edgecolors=edgecolors)
+                edgecolors=edgecolors, linewidth=self.linewidth)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
 

diff -r af74e7e1bf6f0ba11ca0e2eb58ed795c2ce1111e -r 5112b4710de5605fe8e4bc18a18e8251c1904626 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -433,7 +433,7 @@
         if field_name is None:
             field_name = r'$\rm{'+field+r'}$'
         elif field_name.find('$') == -1:
-            field_name = r'$\rm{'+field+r'}$'
+            field_name = r'$\rm{'+field_name+r'}$'
         if units is None or units == '':
             label = field_name
         else:
@@ -574,13 +574,13 @@
         if field_name is None:
             field_name = r'$\rm{'+field+r'}$'
         elif field_name.find('$') == -1:
-            field_name = r'$\rm{'+field+r'}$'
+            field_name = r'$\rm{'+field_name+r'}$'
         if units is None or units == '':
             label = field_name
         else:
             label = field_name+r'$\/\/('+units+r')$'
         return label
-        
+
     def _get_field_log(self, field_z, profile):
         pf = profile.data_source.pf
         zfi = pf.field_info[field_z]


https://bitbucket.org/yt_analysis/yt/commits/a80d973e5b13/
Changeset:   a80d973e5b13
Branch:      yt
User:        atmyers
Date:        2014-05-23 21:13:01
Summary:     Merged yt_analysis/yt into yt
Affected #:  71 files



https://bitbucket.org/yt_analysis/yt/commits/29e7f9e4ea9a/
Changeset:   29e7f9e4ea9a
Branch:      yt
User:        atmyers
Date:        2014-06-06 22:59:16
Summary:     Merged yt_analysis/yt into yt
Affected #:  2 files

diff -r a80d973e5b1334d37c8c89da28ad2321a7e6e4d5 -r 29e7f9e4ea9a563d5a2f2e8c116861fb6678a940 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -289,7 +289,7 @@
         """
         if isinstance(plot, (PlotWindow, PhasePlot)):
             plot.refresh()
-        elif isinstance(plot, (VMPlot, PlotWindow)):
+        if isinstance(plot, (VMPlot, PlotWindow)):
             if isinstance(plot, PlotWindow):
                 data = plot._frb
                 width = plot.width[0]

diff -r a80d973e5b1334d37c8c89da28ad2321a7e6e4d5 -r 29e7f9e4ea9a563d5a2f2e8c116861fb6678a940 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1069,7 +1069,8 @@
     """
     annotate_particles(width, p_size=1.0, col='k', marker='o', stride=1.0,
                        ptype=None, stars_only=False, dm_only=False,
-                       minimum_mass=None, alpha=1.0)
+                       minimum_mass=None, alpha=1.0, min_star_age=None,
+                       max_star_age=None)
 
     Adds particle positions, based on a thick slab along *axis* with a
     *width* along the line of sight.  *p_size* controls the number of
@@ -1077,14 +1078,16 @@
     restrict plotted particles to only those that are of a given type.
     *minimum_mass* will require that the particles be of a given mass,
     calculated via ParticleMassMsun, to be plotted. *alpha* determines
-    each particle's opacity.
+    each particle's opacity. If stars are plotted, min_star_age and
+    max_star_age filter them (in years).
     """
     _type_name = "particles"
     region = None
     _descriptor = None
     def __init__(self, width, p_size=1.0, col='k', marker='o', stride=1.0,
                  ptype=None, stars_only=False, dm_only=False,
-                 minimum_mass=None, alpha=1.0):
+                 minimum_mass=None, alpha=1.0, min_star_age=None,
+                 max_star_age=None):
         PlotCallback.__init__(self)
         self.width = width
         self.p_size = p_size
@@ -1096,10 +1099,12 @@
         self.dm_only = dm_only
         self.minimum_mass = minimum_mass
         self.alpha = alpha
+        self.min_star_age = min_star_age
+        self.max_star_age = max_star_age
 
     def __call__(self, plot):
         data = plot.data
-        # we construct a recantangular prism
+        # we construct a rectangular prism
         x0, x1 = plot.xlim
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
@@ -1121,6 +1126,16 @@
         if self.minimum_mass is not None:
             gg &= (reg["ParticleMassMsun"] >= self.minimum_mass)
             if gg.sum() == 0: return
+        if self.min_star_age is not None:
+            current_time = plot.pf.current_time
+            age_in_years = (current_time - reg["creation_time"]) * (plot.pf['Time'] / 3.155e7)
+            gg &= (age_in_years >= self.min_star_age)
+            if gg.sum() == 0: return
+        if self.max_star_age is not None:
+            current_time = plot.pf.current_time
+            age_in_years = (current_time - reg["creation_time"]) * (plot.pf['Time'] / 3.155e7)
+            gg &= (age_in_years <= self.max_star_age)
+            if gg.sum() == 0: return
         plot._axes.hold(True)
         px, py = self.convert_to_plot(plot,
                     [reg[field_x][gg][::self.stride],


https://bitbucket.org/yt_analysis/yt/commits/2ec77aca4ee8/
Changeset:   2ec77aca4ee8
Branch:      yt
User:        atmyers
Date:        2014-07-01 00:51:17
Summary:     Merged yt_analysis/yt into yt
Affected #:  4 files

diff -r 29e7f9e4ea9a563d5a2f2e8c116861fb6678a940 -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -392,13 +392,9 @@
             pf = load(my_segment['filename'])
 
             if self.near_redshift == self.far_redshift:
-                h_vel = cm_per_km * pf.units['mpc'] * \
-                  vector_length(my_segment['start'], my_segment['end']) * \
-                  self.cosmology.HubbleConstantNow * \
-                  self.cosmology.ExpansionFactor(my_segment['redshift'])
-                next_redshift = my_segment["redshift"] + \
-                  np.sqrt((1. + h_vel / speed_of_light_cgs) /
-                          (1. - h_vel / speed_of_light_cgs)) + 1.
+                next_redshift = my_segment["redshift"] - \
+                    self._deltaz_forward(my_segment["redshift"], 
+                                         pf.units["mpc"] * my_segment["traversal_box_fraction"])
             elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:

diff -r 29e7f9e4ea9a563d5a2f2e8c116861fb6678a940 -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 yt/utilities/definitions.py
--- a/yt/utilities/definitions.py
+++ b/yt/utilities/definitions.py
@@ -15,7 +15,7 @@
 
 from .physical_constants import \
    mpc_per_mpc, kpc_per_mpc, pc_per_mpc, au_per_mpc, rsun_per_mpc, \
-   miles_per_mpc, km_per_mpc, cm_per_mpc, sec_per_Gyr, sec_per_Myr, \
+   miles_per_mpc, km_per_mpc, m_per_mpc, cm_per_mpc, sec_per_Gyr, sec_per_Myr, \
    sec_per_year, sec_per_day
 
 # The number of levels we expect to have at most
@@ -44,6 +44,7 @@
                   'rsun'  : rsun_per_mpc,
                   'miles' : miles_per_mpc,
                   'km'    : km_per_mpc,
+                  'm'     : m_per_mpc,
                   'cm'    : cm_per_mpc}
 
 # Nicely formatted versions of common length units

diff -r 29e7f9e4ea9a563d5a2f2e8c116861fb6678a940 -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -48,6 +48,7 @@
 mpc_per_rsun  = 2.253962e-14
 mpc_per_miles = 5.21552871e-20
 mpc_per_km    = 3.24077929e-20
+mpc_per_m     = 3.24077929e-23
 mpc_per_cm    = 3.24077929e-25
 kpc_per_cm    = mpc_per_cm / mpc_per_kpc
 km_per_pc     = 3.08567758e13
@@ -63,6 +64,7 @@
 rsun_per_mpc  = 1.0 / mpc_per_rsun
 miles_per_mpc = 1.0 / mpc_per_miles
 km_per_mpc    = 1.0 / mpc_per_km
+m_per_mpc     = 1.0 / mpc_per_m
 cm_per_mpc    = 1.0 / mpc_per_cm
 cm_per_kpc    = 1.0 / kpc_per_cm
 cm_per_km     = 1.0 / km_per_cm

diff -r 29e7f9e4ea9a563d5a2f2e8c116861fb6678a940 -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -512,7 +512,7 @@
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
-    for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
+    for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'm', 'cm']:
         vv = v*pf[unit]
         if vv < max_nu and vv > 1.0:
             good_u = unit


https://bitbucket.org/yt_analysis/yt/commits/b218d1ab004c/
Changeset:   b218d1ab004c
Branch:      yt
User:        atmyers
Date:        2014-08-10 20:10:54
Summary:     merging
Affected #:  10 files

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b yt/frontends/charm/api.py
--- /dev/null
+++ b/yt/frontends/charm/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.charm
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      CharmGrid, \
+      CharmHierarchy, \
+      CharmStaticOutput
+
+from .fields import \
+      CharmFieldInfo, \
+      add_charm_field
+
+from .io import \
+      IOHandlerCharmHDF5

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b yt/frontends/charm/data_structures.py
--- /dev/null
+++ b/yt/frontends/charm/data_structures.py
@@ -0,0 +1,341 @@
+"""
+Data structures for Charm.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import re
+import os
+import weakref
+import numpy as np
+
+from collections import \
+     defaultdict
+from string import \
+     strip, \
+     rstrip
+from stat import \
+     ST_CTIME
+
+from .definitions import \
+     charm2enzoDict, \
+     yt2charmFieldsDict, \
+     parameterDict \
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+     AMRGridPatch
+from yt.data_objects.hierarchy import \
+     AMRHierarchy
+from yt.data_objects.static_output import \
+     StaticOutput
+from yt.utilities.definitions import \
+     mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     parallel_root_only
+from yt.utilities.io_handler import \
+    io_registry
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import \
+    CharmFieldInfo, Charm2DFieldInfo, Charm1DFieldInfo, \
+    add_charm_field, add_charm_2d_field, add_charm_1d_field, \
+    KnownCharmFields
+
+class CharmGrid(AMRGridPatch):
+    _id_offset = 0
+    __slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level, start, stop):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.ActiveDimensions = stop - start + 1
+
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return np.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+class CharmHierarchy(AMRHierarchy):
+
+    grid = CharmGrid
+    _data_file = None
+
+    def __init__(self,pf,data_style='charm_hdf5'):
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.data_style = data_style
+
+        if pf.dimensionality == 1:
+            self.data_style = "charm1d_hdf5"
+        if pf.dimensionality == 2:
+            self.data_style = "charm2d_hdf5"
+
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = os.path.abspath(
+            self.parameter_file.parameter_filename)
+        self.directory = pf.fullpath
+        self._handle = pf._handle
+
+        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
+        AMRHierarchy.__init__(self,pf,data_style)
+        self._read_particles()
+
+    def _read_particles(self):
+        
+        self.num_particles = 0
+        particles_per_grid = []
+        for key, val in self._handle.items():
+            if key.startswith('level'):
+                level_particles = val['particles:offsets'][:]
+                self.num_particles += level_particles.sum()
+                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
+
+        for i, grid in enumerate(self.grids):
+            self.grids[i].NumberOfParticles = particles_per_grid[i]
+            self.grid_particle_count[i] = particles_per_grid[i]
+
+        assert(self.num_particles == self.grid_particle_count.sum())
+
+    def _detect_fields(self):
+        self.field_list = []
+        for key, val in self._handle.attrs.items():
+            if key.startswith("component"):
+                self.field_list.append(val)
+          
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._handle[lev]['Processors'].len()
+
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.parameter_file.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class CharmStaticOutput(StaticOutput):
+    _hierarchy_class = CharmHierarchy
+    _fieldinfo_fallback = CharmFieldInfo
+    _fieldinfo_known = KnownCharmFields
+
+    def __init__(self, filename, data_style='charm_hdf5',
+                 storage_filename = None, ini_filename = None):
+        self._handle = h5py.File(filename,'r')
+        self.current_time = self._handle['level_0'].attrs['time']
+        self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
+        StaticOutput.__init__(self,filename,data_style)
+        self.storage_filename = storage_filename
+        self.cosmological_simulation = False
+
+        # These are parameters that I very much wish to get rid of.
+        self.parameters["HydroMethod"] = 'charm' # always PPM DE
+        self.parameters["DualEnergyFormalism"] = 0 
+        self.parameters["EOSType"] = -1 # default
+
+    def __del__(self):
+        self._handle.close()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        seconds = 1 #self["Time"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = seconds / sec_conversion[unit]
+        for key in yt2charmFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    def _parse_parameter_file(self):
+        
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_left_edge = self.__calc_left_edge()
+        self.domain_right_edge = self.__calc_right_edge()
+        self.domain_dimensions = self.__calc_domain_dimensions()
+
+        if self.dimensionality == 1:
+            self._fieldinfo_fallback = Charm1DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self._fieldinfo_fallback = Charm2DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+        
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self.periodicity = (True,) * self.dimensionality
+
+    def __calc_left_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        return LE
+
+    def __calc_right_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return RE
+
+    def __calc_domain_dimensions(self):
+        fileh = self._handle
+        D = self.dimensionality
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return R_index - L_index
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            valid = "Charm_global" in fileh["/"]
+            fileh.close()
+            return valid
+        except:
+            pass
+        return False
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b yt/frontends/charm/definitions.py
--- /dev/null
+++ b/yt/frontends/charm/definitions.py
@@ -0,0 +1,54 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                                 }
+
+charm2enzoDict = {"GAMMA": "Gamma",
+                  "Ref_ratio": "RefineBy"
+                                    }
+
+yt2charmFieldsDict = {}
+charm2ytFieldsDict = {}
+

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b yt/frontends/charm/fields.py
--- /dev/null
+++ b/yt/frontends/charm/fields.py
@@ -0,0 +1,150 @@
+"""
+Charm-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+import numpy as np
+
+CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = CharmFieldInfo.add_field
+
+KnownCharmFields = FieldInfoContainer()
+add_charm_field = KnownCharmFields.add_field
+
+add_charm_field("potential", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("potential")],
+                units=r"")
+
+add_charm_field("density", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("density")],
+                units=r"")
+
+add_charm_field("gravitational_field_x", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_x")],
+                units=r"")
+
+add_charm_field("gravitational_field_y", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_y")],
+                units=r"")
+
+add_charm_field("gravitational_field_z", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_z")],
+                units=r"")
+
+def _Density(field, data):
+    return data["density"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+        
+    return _Particles
+
+_particle_field_list = ["mass",
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "velocity_x",
+                        "velocity_y",
+                        "velocity_z",
+                        "acceleration_x",
+                        "acceleration_y",
+                        "acceleration_z"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)
+
+#do overrides for 2D
+
+Charm2DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_2d_field = Charm2DFieldInfo.add_field
+
+def _gravitational_field_z(field, data):
+    return np.zeros(data['gravitational_field_x'].shape,
+                    dtype='float64')
+add_charm_2d_field("gravitational_field_z", function=_gravitational_field_z)
+
+def _particle_position_z(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+add_charm_2d_field("particle_position_z", function=_particle_position_z)
+
+def _particle_velocity_z(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+add_charm_2d_field("particle_velocity_z", function=_particle_velocity_z)
+
+def _particle_acceleration_z(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+add_charm_2d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+#do overrides for 1D
+
+Charm1DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_1d_field = Charm1DFieldInfo.add_field
+
+def _gravitational_field_y(field, data):
+    return np.zeros(data['gravitational_field_y'].shape,
+                    dtype='float64')
+
+def _particle_position_y(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+
+def _particle_velocity_y(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+
+def _particle_acceleration_y(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+
+add_charm_1d_field("gravitational_field_z", function=_gravitational_field_z)
+add_charm_1d_field("gravitational_field_y", function=_gravitational_field_y)
+
+add_charm_1d_field("particle_position_z", function=_particle_position_z)
+add_charm_1d_field("particle_velocity_z", function=_particle_velocity_z)
+add_charm_1d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+add_charm_1d_field("particle_position_y", function=_particle_position_y)
+add_charm_1d_field("particle_velocity_y", function=_particle_velocity_y)
+add_charm_1d_field("particle_acceleration_y", function=_particle_acceleration_y)
\ No newline at end of file

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b yt/frontends/charm/io.py
--- /dev/null
+++ b/yt/frontends/charm/io.py
@@ -0,0 +1,127 @@
+"""
+The data-file handling functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import h5py
+import os
+import re
+import numpy as np
+
+from yt.utilities.io_handler import \
+           BaseIOHandler
+
+class IOHandlerCharmHDF5(BaseIOHandler):
+    _data_style = "charm_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'position_z': 2,
+                                      'velocity_x': 3,
+                                      'velocity_y': 4,
+                                      'velocity_z': 5,
+                                      'acceleration_x': 6,
+                                      'acceleration_y': 7,
+                                      'acceleration_z': 8,
+                                      'mass': 9}
+
+    _field_dict = None
+    @property
+    def field_dict(self):
+        if self._field_dict is not None:
+            return self._field_dict
+        field_dict = {}
+        for key, val in self._handle.attrs.items():
+            if key.startswith('component_'):
+                comp_number = int(re.match('component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._field_dict = field_dict
+        return self._field_dict
+        
+    def _read_field_names(self, grid):
+        ncomp = int(self._handle['/'].attrs['num_components'])
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
+    
+    def _read_data(self,grid,field):
+
+        lstring = 'level_%i' % grid.Level
+        lev = self._handle[lstring]
+        dims = grid.ActiveDimensions
+        boxsize = dims.prod()
+        
+        grid_offset = lev[self._offset_string][grid._level_id]
+        start = grid_offset+self.field_dict[field]*boxsize
+        stop = start + boxsize
+        data = lev[self._data_string][start:stop]
+        
+        return data.reshape(dims, order='F')
+
+    def _read_particles(self, grid, name):
+
+        field_index = self._particle_field_index[name]
+        lev = 'level_%s' % grid.Level
+
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
+        items_per_particle = len(self._particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+
+        # convert between the global grid id and the id on this level            
+        grid_levels = np.array([g.Level for g in self.pf.h.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
+        return data[field_index::items_per_particle]
+
+class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm2d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'velocity_x': 2,
+                                      'velocity_y': 3,
+                                      'acceleration_x': 4,
+                                      'acceleration_y': 5,
+                                      'mass': 6}
+
+
+class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm1d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'velocity_x': 1,
+                                      'acceleration_x': 2,
+                                      'mass': 3}
\ No newline at end of file

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b yt/frontends/charm/setup.py
--- /dev/null
+++ b/yt/frontends/charm/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('charm', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


https://bitbucket.org/yt_analysis/yt/commits/3feaff518124/
Changeset:   3feaff518124
Branch:      yt
User:        atmyers
Date:        2014-08-10 20:17:52
Summary:     removing CHARM stuff from this branch
Affected #:  7 files

diff -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b -r 3feaff518124093579c9c131992d1619d0278e9d yt/frontends/charm/api.py
--- a/yt/frontends/charm/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.charm
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CharmGrid, \
-      CharmHierarchy, \
-      CharmStaticOutput
-
-from .fields import \
-      CharmFieldInfo, \
-      add_charm_field
-
-from .io import \
-      IOHandlerCharmHDF5

diff -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b -r 3feaff518124093579c9c131992d1619d0278e9d yt/frontends/charm/data_structures.py
--- a/yt/frontends/charm/data_structures.py
+++ /dev/null
@@ -1,341 +0,0 @@
-"""
-Data structures for Charm.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import re
-import os
-import weakref
-import numpy as np
-
-from collections import \
-     defaultdict
-from string import \
-     strip, \
-     rstrip
-from stat import \
-     ST_CTIME
-
-from .definitions import \
-     charm2enzoDict, \
-     yt2charmFieldsDict, \
-     parameterDict \
-
-from yt.funcs import *
-from yt.data_objects.grid_patch import \
-     AMRGridPatch
-from yt.data_objects.hierarchy import \
-     AMRHierarchy
-from yt.data_objects.static_output import \
-     StaticOutput
-from yt.utilities.definitions import \
-     mpc_conversion, sec_conversion
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_root_only
-from yt.utilities.io_handler import \
-    io_registry
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-from .fields import \
-    CharmFieldInfo, Charm2DFieldInfo, Charm1DFieldInfo, \
-    add_charm_field, add_charm_2d_field, add_charm_1d_field, \
-    KnownCharmFields
-
-class CharmGrid(AMRGridPatch):
-    _id_offset = 0
-    __slots__ = ["_level_id", "stop_index"]
-    def __init__(self, id, hierarchy, level, start, stop):
-        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
-                              hierarchy = hierarchy)
-        self.Parent = []
-        self.Children = []
-        self.Level = level
-        self.ActiveDimensions = stop - start + 1
-
-    def get_global_startindex(self):
-        """
-        Return the integer starting index for each dimension at the current
-        level.
-
-        """
-        if self.start_index != None:
-            return self.start_index
-        if self.Parent == []:
-            iLE = self.LeftEdge - self.pf.domain_left_edge
-            start_index = iLE / self.dds
-            return np.rint(start_index).astype('int64').ravel()
-        pdx = self.Parent[0].dds
-        start_index = (self.Parent[0].get_global_startindex()) + \
-            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
-        return self.start_index
-
-    def _setup_dx(self):
-        # has already been read in and stored in hierarchy
-        self.dds = self.hierarchy.dds_list[self.Level]
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-class CharmHierarchy(AMRHierarchy):
-
-    grid = CharmGrid
-    _data_file = None
-
-    def __init__(self,pf,data_style='charm_hdf5'):
-        self.domain_left_edge = pf.domain_left_edge
-        self.domain_right_edge = pf.domain_right_edge
-        self.data_style = data_style
-
-        if pf.dimensionality == 1:
-            self.data_style = "charm1d_hdf5"
-        if pf.dimensionality == 2:
-            self.data_style = "charm2d_hdf5"
-
-        self.field_indexes = {}
-        self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
-        self.hierarchy_filename = os.path.abspath(
-            self.parameter_file.parameter_filename)
-        self.directory = pf.fullpath
-        self._handle = pf._handle
-
-        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
-        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
-        AMRHierarchy.__init__(self,pf,data_style)
-        self._read_particles()
-
-    def _read_particles(self):
-        
-        self.num_particles = 0
-        particles_per_grid = []
-        for key, val in self._handle.items():
-            if key.startswith('level'):
-                level_particles = val['particles:offsets'][:]
-                self.num_particles += level_particles.sum()
-                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
-
-        for i, grid in enumerate(self.grids):
-            self.grids[i].NumberOfParticles = particles_per_grid[i]
-            self.grid_particle_count[i] = particles_per_grid[i]
-
-        assert(self.num_particles == self.grid_particle_count.sum())
-
-    def _detect_fields(self):
-        self.field_list = []
-        for key, val in self._handle.attrs.items():
-            if key.startswith("component"):
-                self.field_list.append(val)
-          
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        AMRHierarchy._setup_classes(self, dd)
-        self.object_types.sort()
-
-    def _count_grids(self):
-        self.num_grids = 0
-        for lev in self._levels:
-            self.num_grids += self._handle[lev]['Processors'].len()
-
-    def _parse_hierarchy(self):
-        f = self._handle # shortcut
-
-        grids = []
-        self.dds_list = []
-        i = 0
-        D = self.parameter_file.dimensionality
-        for lev_index, lev in enumerate(self._levels):
-            level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            try:
-                boxes = f[lev]['boxes'].value
-            except KeyError:
-                boxes = f[lev]['particles:boxes'].value
-            dx = f[lev].attrs['dx']
-            self.dds_list.append(dx * np.ones(3))
-
-            if D == 1:
-                self.dds_list[lev_index][1] = 1.0
-                self.dds_list[lev_index][2] = 1.0
-
-            if D == 2:
-                self.dds_list[lev_index][2] = 1.0
-
-            for level_id, box in enumerate(boxes):
-                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
-                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
-                
-                if D == 1:
-                    si = np.concatenate((si, [0.0, 0.0]))
-                    ei = np.concatenate((ei, [0.0, 0.0]))
-
-                if D == 2:
-                    si = np.concatenate((si, [0.0]))
-                    ei = np.concatenate((ei, [0.0]))
-
-                pg = self.grid(len(grids),self,level=level_number,
-                               start = si, stop = ei)
-                grids.append(pg)
-                grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
-                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
-                self.grid_particle_count[i] = 0
-                self.grid_dimensions[i] = ei - si + 1
-                i += 1
-        self.grids = np.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
-
-    def _populate_grid_objects(self):
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-
-        for g in self.grids:
-            g.Children = self._get_grid_children(g)
-            for g1 in g.Children:
-                g1.Parent.append(g)
-        self.max_level = self.grid_levels.max()
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
-
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
-class CharmStaticOutput(StaticOutput):
-    _hierarchy_class = CharmHierarchy
-    _fieldinfo_fallback = CharmFieldInfo
-    _fieldinfo_known = KnownCharmFields
-
-    def __init__(self, filename, data_style='charm_hdf5',
-                 storage_filename = None, ini_filename = None):
-        self._handle = h5py.File(filename,'r')
-        self.current_time = self._handle['level_0'].attrs['time']
-        self.ini_filename = ini_filename
-        self.fullplotdir = os.path.abspath(filename)
-        StaticOutput.__init__(self,filename,data_style)
-        self.storage_filename = storage_filename
-        self.cosmological_simulation = False
-
-        # These are parameters that I very much wish to get rid of.
-        self.parameters["HydroMethod"] = 'charm' # always PPM DE
-        self.parameters["DualEnergyFormalism"] = 0 
-        self.parameters["EOSType"] = -1 # default
-
-    def __del__(self):
-        self._handle.close()
-
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        self.units = {}
-        self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self._setup_nounits_units()
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        seconds = 1 #self["Time"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2charmFieldsDict:
-            self.conversion_factors[key] = 1.0
-
-    def _setup_nounits_units(self):
-        z = 0
-        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
-        if not self.has_key("TimeUnits"):
-            mylog.warning("No time units.  Setting 1.0 = 1 second.")
-            self.conversion_factors["Time"] = 1.0
-        for unit in mpc_conversion.keys():
-            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
-
-    def _parse_parameter_file(self):
-        
-        self.unique_identifier = \
-                               int(os.stat(self.parameter_filename)[ST_CTIME])
-        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
-        self.domain_left_edge = self.__calc_left_edge()
-        self.domain_right_edge = self.__calc_right_edge()
-        self.domain_dimensions = self.__calc_domain_dimensions()
-
-        if self.dimensionality == 1:
-            self._fieldinfo_fallback = Charm1DFieldInfo
-            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
-            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
-            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
-
-        if self.dimensionality == 2:
-            self._fieldinfo_fallback = Charm2DFieldInfo
-            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
-            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
-            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
-        
-        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True,) * self.dimensionality
-
-    def __calc_left_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        D = self.dimensionality
-        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
-        return LE
-
-    def __calc_right_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        D = self.dimensionality
-        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
-        return RE
-
-    def __calc_domain_dimensions(self):
-        fileh = self._handle
-        D = self.dimensionality
-        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
-        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
-        return R_index - L_index
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            valid = "Charm_global" in fileh["/"]
-            fileh.close()
-            return valid
-        except:
-            pass
-        return False
-
-    @parallel_root_only
-    def print_key_parameters(self):
-        for a in ["current_time", "domain_dimensions", "domain_left_edge",
-                  "domain_right_edge"]:
-            if not hasattr(self, a):
-                mylog.error("Missing %s in parameter file definition!", a)
-                continue
-            v = getattr(self, a)
-            mylog.info("Parameters: %-25s = %s", a, v)

diff -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b -r 3feaff518124093579c9c131992d1619d0278e9d yt/frontends/charm/definitions.py
--- a/yt/frontends/charm/definitions.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-Various definitions for various other modules and routines
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                                 }
-
-charm2enzoDict = {"GAMMA": "Gamma",
-                  "Ref_ratio": "RefineBy"
-                                    }
-
-yt2charmFieldsDict = {}
-charm2ytFieldsDict = {}
-

diff -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b -r 3feaff518124093579c9c131992d1619d0278e9d yt/frontends/charm/fields.py
--- a/yt/frontends/charm/fields.py
+++ /dev/null
@@ -1,150 +0,0 @@
-"""
-Charm-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    NullFunc, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-import numpy as np
-
-CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = CharmFieldInfo.add_field
-
-KnownCharmFields = FieldInfoContainer()
-add_charm_field = KnownCharmFields.add_field
-
-add_charm_field("potential", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("potential")],
-                units=r"")
-
-add_charm_field("density", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("density")],
-                units=r"")
-
-add_charm_field("gravitational_field_x", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_x")],
-                units=r"")
-
-add_charm_field("gravitational_field_y", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_y")],
-                units=r"")
-
-add_charm_field("gravitational_field_z", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_z")],
-                units=r"")
-
-def _Density(field, data):
-    return data["density"]
-add_field("Density",function=_Density, take_log=True,
-          units=r'\rm{g}/\rm{cm^3}')
-
-def particle_func(p_field, dtype='float64'):
-    def _Particles(field, data):
-        io = data.hierarchy.io
-        if not data.NumberOfParticles > 0:
-            return np.array([], dtype=dtype)
-        else:
-            return io._read_particles(data, p_field).astype(dtype)
-        
-    return _Particles
-
-_particle_field_list = ["mass",
-                        "position_x",
-                        "position_y",
-                        "position_z",
-                        "velocity_x",
-                        "velocity_y",
-                        "velocity_z",
-                        "acceleration_x",
-                        "acceleration_y",
-                        "acceleration_z"]
-
-for pf in _particle_field_list:
-    pfunc = particle_func("%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
-
-def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64')
-    return particles
-
-def _ParticleMassMsun(field, data):
-    particles = data["particle_mass"].astype('float64')
-    return particles/1.989e33
-
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True)
-add_field("ParticleMassMsun",
-          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
-          particle_type=True)
-
-#do overrides for 2D
-
-Charm2DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
-add_charm_2d_field = Charm2DFieldInfo.add_field
-
-def _gravitational_field_z(field, data):
-    return np.zeros(data['gravitational_field_x'].shape,
-                    dtype='float64')
-add_charm_2d_field("gravitational_field_z", function=_gravitational_field_z)
-
-def _particle_position_z(field, data):
-    return np.zeros(data['particle_position_x'].shape, dtype='float64')
-add_charm_2d_field("particle_position_z", function=_particle_position_z)
-
-def _particle_velocity_z(field, data):
-    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
-add_charm_2d_field("particle_velocity_z", function=_particle_velocity_z)
-
-def _particle_acceleration_z(field, data):
-    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
-add_charm_2d_field("particle_acceleration_z", function=_particle_acceleration_z)
-
-#do overrides for 1D
-
-Charm1DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
-add_charm_1d_field = Charm1DFieldInfo.add_field
-
-def _gravitational_field_y(field, data):
-    return np.zeros(data['gravitational_field_y'].shape,
-                    dtype='float64')
-
-def _particle_position_y(field, data):
-    return np.zeros(data['particle_position_x'].shape, dtype='float64')
-
-def _particle_velocity_y(field, data):
-    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
-
-def _particle_acceleration_y(field, data):
-    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
-
-add_charm_1d_field("gravitational_field_z", function=_gravitational_field_z)
-add_charm_1d_field("gravitational_field_y", function=_gravitational_field_y)
-
-add_charm_1d_field("particle_position_z", function=_particle_position_z)
-add_charm_1d_field("particle_velocity_z", function=_particle_velocity_z)
-add_charm_1d_field("particle_acceleration_z", function=_particle_acceleration_z)
-
-add_charm_1d_field("particle_position_y", function=_particle_position_y)
-add_charm_1d_field("particle_velocity_y", function=_particle_velocity_y)
-add_charm_1d_field("particle_acceleration_y", function=_particle_acceleration_y)
\ No newline at end of file

diff -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b -r 3feaff518124093579c9c131992d1619d0278e9d yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ /dev/null
@@ -1,127 +0,0 @@
-"""
-The data-file handling functions
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import h5py
-import os
-import re
-import numpy as np
-
-from yt.utilities.io_handler import \
-           BaseIOHandler
-
-class IOHandlerCharmHDF5(BaseIOHandler):
-    _data_style = "charm_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'position_y': 1,
-                                      'position_z': 2,
-                                      'velocity_x': 3,
-                                      'velocity_y': 4,
-                                      'velocity_z': 5,
-                                      'acceleration_x': 6,
-                                      'acceleration_y': 7,
-                                      'acceleration_z': 8,
-                                      'mass': 9}
-
-    _field_dict = None
-    @property
-    def field_dict(self):
-        if self._field_dict is not None:
-            return self._field_dict
-        field_dict = {}
-        for key, val in self._handle.attrs.items():
-            if key.startswith('component_'):
-                comp_number = int(re.match('component_(\d)', key).groups()[0])
-                field_dict[val] = comp_number
-        self._field_dict = field_dict
-        return self._field_dict
-        
-    def _read_field_names(self, grid):
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
-    
-    def _read_data(self,grid,field):
-
-        lstring = 'level_%i' % grid.Level
-        lev = self._handle[lstring]
-        dims = grid.ActiveDimensions
-        boxsize = dims.prod()
-        
-        grid_offset = lev[self._offset_string][grid._level_id]
-        start = grid_offset+self.field_dict[field]*boxsize
-        stop = start + boxsize
-        data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
-
-    def _read_particles(self, grid, name):
-
-        field_index = self._particle_field_index[name]
-        lev = 'level_%s' % grid.Level
-
-        particles_per_grid = self._handle[lev]['particles:offsets'].value
-        items_per_particle = len(self._particle_field_index)
-
-        # compute global offset position
-        offsets = items_per_particle * np.cumsum(particles_per_grid)
-        offsets = np.append(np.array([0]), offsets)
-        offsets = np.array(offsets, dtype=np.int64)
-
-        # convert between the global grid id and the id on this level            
-        grid_levels = np.array([g.Level for g in self.pf.h.grids])
-        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
-        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
-        lo = grid.id - grid_level_offset
-        hi = lo + 1
-
-        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-        return data[field_index::items_per_particle]
-
-class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
-    _data_style = "charm2d_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'position_y': 1,
-                                      'velocity_x': 2,
-                                      'velocity_y': 3,
-                                      'acceleration_x': 4,
-                                      'acceleration_y': 5,
-                                      'mass': 6}
-
-
-class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
-    _data_style = "charm1d_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'velocity_x': 1,
-                                      'acceleration_x': 2,
-                                      'mass': 3}
\ No newline at end of file

diff -r b218d1ab004ce25314c90cd5469e0cbe68f9f85b -r 3feaff518124093579c9c131992d1619d0278e9d yt/frontends/charm/setup.py
--- a/yt/frontends/charm/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('charm', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config


https://bitbucket.org/yt_analysis/yt/commits/1009bad03bea/
Changeset:   1009bad03bea
Branch:      yt
User:        atmyers
Date:        2014-08-10 20:21:41
Summary:     fixing Chombo IO issue in the yt branch
Affected #:  1 file

diff -r 3feaff518124093579c9c131992d1619d0278e9d -r 1009bad03bea2184f5d8d35a118f89265b1e0bfe yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -83,16 +83,8 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             grid = chunks[0].objs[0]
-            lstring = 'level_%i' % grid.Level
-            lev = self._handle[lstring]
-            grid_offset = lev[self._offset_string][grid._level_id]
-            boxsize = grid.ActiveDimensions.prod()
             for ftype, fname in fields:
-                start = grid_offset+self.field_dict[fname]*boxsize
-                stop = start + boxsize
-                data = lev[self._data_string][start:stop]
-                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
-                                        order='F')
+                rv[ftype, fname] = self._read_data(grid, fname)
             return rv
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
@@ -108,16 +100,10 @@
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
-                lstring = 'level_%i' % g.Level
-                lev = self._handle[lstring]
-                grid_offset = lev[self._offset_string][g._level_id]
-                boxsize = g.ActiveDimensions.prod()
                 nd = 0
                 for field in fields:
-                    start = grid_offset+self.field_dict[fname]*boxsize
-                    stop = start + boxsize
-                    data = lev[self._data_string][start:stop]
-                    data = data.reshape(g.ActiveDimensions, order='F')
+                    ftype, fname = field
+                    data = self._read_data(g, fname)
                     nd = g.select(selector, data, rv[field], ind) # caches
                 ind += nd
         return rv


https://bitbucket.org/yt_analysis/yt/commits/d5b141809361/
Changeset:   d5b141809361
Branch:      yt
User:        ngoldbaum
Date:        2014-08-11 20:09:36
Summary:     Merged in atmyers/yt-fixes (pull request #1139)

A patch that fixes an issue uncovered by Antoine Strugarek in the mailing list
Affected #:  4 files

diff -r 668704ca4a85a154d2113fc7978eda970ba98eab -r d5b14180936148e2400ac1c495f429b8d58ba62b yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -83,16 +83,8 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             grid = chunks[0].objs[0]
-            lstring = 'level_%i' % grid.Level
-            lev = self._handle[lstring]
-            grid_offset = lev[self._offset_string][grid._level_id]
-            boxsize = grid.ActiveDimensions.prod()
             for ftype, fname in fields:
-                start = grid_offset+self.field_dict[fname]*boxsize
-                stop = start + boxsize
-                data = lev[self._data_string][start:stop]
-                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
-                                        order='F')
+                rv[ftype, fname] = self._read_data(grid, fname)
             return rv
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
@@ -108,16 +100,10 @@
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
-                lstring = 'level_%i' % g.Level
-                lev = self._handle[lstring]
-                grid_offset = lev[self._offset_string][g._level_id]
-                boxsize = g.ActiveDimensions.prod()
                 nd = 0
                 for field in fields:
-                    start = grid_offset+self.field_dict[fname]*boxsize
-                    stop = start + boxsize
-                    data = lev[self._data_string][start:stop]
-                    data = data.reshape(g.ActiveDimensions, order='F')
+                    ftype, fname = field
+                    data = self._read_data(g, fname)
                     nd = g.select(selector, data, rv[field], ind) # caches
                 ind += nd
         return rv

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list