[yt-svn] commit/yt: 26 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sun May 4 07:57:01 PDT 2014


26 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/933192a4e977/
Changeset:   933192a4e977
Branch:      yt-3.0
User:        atmyers
Date:        2014-03-31 21:51:19
Summary:     first pass at refactoring chombo frontend
Affected #:  3 files

diff -r 19470c49dd8c1c5c66ee7a8bd638940761b5d3e9 -r 933192a4e977b22fd066e390549fe3230050a49a yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -16,11 +16,12 @@
 from .data_structures import \
       ChomboGrid, \
       ChomboHierarchy, \
-      ChomboDataset
+      ChomboDataset, \
+      Orion2Hierarchy, \
+      Orion2Dataset
 
 from .fields import \
       ChomboFieldInfo
-add_chombo_field = ChomboFieldInfo.add_field
 
 from .io import \
       IOHandlerChomboHDF5

diff -r 19470c49dd8c1c5c66ee7a8bd638940761b5d3e9 -r 933192a4e977b22fd066e390549fe3230050a49a yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -90,11 +90,18 @@
 class ChomboHierarchy(GridIndex):
 
     grid = ChomboGrid
+    _data_file = None
 
     def __init__(self,pf,dataset_type='chombo_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
         self.dataset_type = dataset_type
+
+        if pf.dimensionality == 1:
+            self.dataset_type = "chombo1d_hdf5"
+        if pf.dimensionality == 2:
+            self.dataset_type = "chombo2d_hdf5"        
+
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the index file is the parameter file!
@@ -103,38 +110,26 @@
         self.directory = pf.fullpath
         self._handle = pf._handle
 
-        self.float_type = self._handle['/level_0']['data:datatype=0'].dtype.name
-        self._levels = self._handle.keys()[1:]
+        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
         GridIndex.__init__(self,pf,dataset_type)
         self._read_particles()
 
     def _read_particles(self):
-        self.particle_filename = self.index_filename[:-4] + 'sink'
-        if not os.path.exists(self.particle_filename): return
-        with open(self.particle_filename, 'r') as f:
-            lines = f.readlines()
-            self.num_stars = int(lines[0].strip().split(' ')[0])
-            for line in lines[1:]:
-                particle_position_x = float(line.split(' ')[1])
-                particle_position_y = float(line.split(' ')[2])
-                particle_position_z = float(line.split(' ')[3])
-                coord = [particle_position_x, particle_position_y, particle_position_z]
-                # for each particle, determine which grids contain it
-                # copied from object_finding_mixin.py
-                mask=np.ones(self.num_grids)
-                for i in xrange(len(coord)):
-                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = np.where(mask == 1)
-                selected_grids = self.grids[ind]
-                # in orion, particles always live on the finest level.
-                # so, we want to assign the particle to the finest of
-                # the grids we just found
-                if len(selected_grids) != 0:
-                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = np.where(self.grids == grid)[0][0]
-                    self.grid_particle_count[ind] += 1
-                    self.grids[ind].NumberOfParticles += 1
+        
+        self.num_particles = 0
+        particles_per_grid = []
+        for key, val in self._handle.items():
+            if key.startswith('level'):
+                level_particles = val['particles:offsets'][:]
+                self.num_particles += level_particles.sum()
+                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
+
+        for i, grid in enumerate(self.grids):
+            self.grids[i].NumberOfParticles = particles_per_grid[i]
+            self.grid_particle_count[i] = particles_per_grid[i]
+
+        assert(self.num_particles == self.grid_particle_count.sum())
 
     def _detect_output_fields(self):
         ncomp = int(self._handle['/'].attrs['num_components'])
@@ -148,32 +143,49 @@
     def _parse_index(self):
         f = self._handle # shortcut
 
-        # this relies on the first Group in the H5 file being
-        # 'Chombo_global'
-        levels = f.keys()[1:]
         grids = []
         self.dds_list = []
         i = 0
-        for lev in levels:
+        D = self.parameter_file.dimensionality
+        for lev_index, lev in enumerate(self._levels):
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            boxes = f[lev]['boxes'].value
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
             dx = f[lev].attrs['dx']
             self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
             for level_id, box in enumerate(boxes):
-                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
                 pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
                 grids.append(pg)
                 grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type)
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1)
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
         self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-#        self.grids = np.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         self._reconstruct_parent_child()
@@ -236,6 +248,115 @@
         return f
 
     def _parse_parameter_file(self):
+        
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_left_edge = self._calc_left_edge()
+        self.domain_right_edge = self._calc_right_edge()
+        self.domain_dimensions = self._calc_domain_dimensions()
+
+        if self.dimensionality == 1:
+            self._fieldinfo_fallback = Chombo1DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self._fieldinfo_fallback = Chombo2DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+        
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self.periodicity = (True,) * self.dimensionality
+
+    def _calc_left_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        return LE
+
+    def _calc_right_edge(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        dx0 = fileh['/level_0'].attrs['dx']
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        fileh.close()
+        return RE
+
+    def _calc_domain_dimensions(self):
+        fileh = self._handle
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        return R_index - L_index
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not (os.path.isfile('pluto.ini') and os.path.isfile('orion2.ini')):
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Chombo_global" in fileh["/"]
+                valid = not ('CeilVA_mass' in fileh.attrs.keys())
+                fileh.close()
+                return valid
+            except:
+                pass
+        return False
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+
+class Orion2Hierarchy(ChomboHierarchy):
+
+    def __init__(self, pf, dataset_type="orion_chombo_native"):
+        ChomboHierarchy.__init__(self, pf, dataset_type)
+
+    def _read_particles(self):
+        self.particle_filename = self.index_filename[:-4] + 'sink'
+        if not os.path.exists(self.particle_filename): return
+        with open(self.particle_filename, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip().split(' ')[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py
+                mask=np.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = np.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+
+class Orion2Dataset(ChomboDataset):
+
+    _index_class = Orion2Hierarchy
+
+    def __init__(self, filename, dataset_type='orion_chombo_native',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+            storage_filename, ini_filename)
+
+    def _parse_parameter_file(self):
         """
         Check to see whether an 'orion2.ini' file
         exists in the plot file directory. If one does, attempt to parse it.
@@ -245,9 +366,9 @@
         if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
-        self.domain_left_edge = self.__calc_left_edge()
-        self.domain_right_edge = self.__calc_right_edge()
-        self.domain_dimensions = self.__calc_domain_dimensions()
+        self.domain_left_edge = self._calc_left_edge()
+        self.domain_right_edge = self._calc_right_edge()
+        self.domain_dimensions = self._calc_domain_dimensions()
         self.dimensionality = 3
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self.periodicity = (True, True, True)
@@ -279,43 +400,20 @@
                     else:
                         self.parameters[paramName] = t
 
-    def __calc_left_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        return LE
-
-    def __calc_right_edge(self):
-        fileh = h5py.File(self.parameter_filename,'r')
-        dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
-        fileh.close()
-        return RE
-
-    def __calc_domain_dimensions(self):
-        fileh = self._handle
-        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
-        return R_index - L_index
-
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        if not os.path.isfile('pluto.ini'):
+        
+        if (os.path.isfile('orion2.ini')):
+            return True
+
+        if not (os.path.isfile('pluto.ini')):
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
+                valid = 'CeilVA_mass' in fileh.attrs.keys()
                 fileh.close()
                 return valid
             except:
                 pass
         return False
 
-    @parallel_root_only
-    def print_key_parameters(self):
-        for a in ["current_time", "domain_dimensions", "domain_left_edge",
-                  "domain_right_edge"]:
-            if not hasattr(self, a):
-                mylog.error("Missing %s in parameter file definition!", a)
-                continue
-            v = getattr(self, a)
-            mylog.info("Parameters: %-25s = %s", a, v)

diff -r 19470c49dd8c1c5c66ee7a8bd638940761b5d3e9 -r 933192a4e977b22fd066e390549fe3230050a49a yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -27,8 +27,19 @@
     _data_string = 'data:datatype=0'
 
     def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, pf)
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
         self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'position_z': 2,
+                                      'velocity_x': 3,
+                                      'velocity_y': 4,
+                                      'velocity_z': 5,
+                                      'acceleration_x': 6,
+                                      'acceleration_y': 7,
+                                      'acceleration_z': 8,
+                                      'mass': 9}
 
     _field_dict = None
     @property
@@ -44,7 +55,6 @@
         
     def _read_field_names(self,grid):
         ncomp = int(self._handle['/'].attrs['num_components'])
-
         fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
@@ -107,6 +117,64 @@
                 ind += nd
         return rv
 
+    def _read_particles(self, grid, name):
+
+        field_index = self._particle_field_index[name]
+        lev = 'level_%s' % grid.Level
+
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
+        items_per_particle = len(self._particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+
+        # convert between the global grid id and the id on this level            
+        grid_levels = np.array([g.Level for g in self.pf.h.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
+        return data[field_index::items_per_particle]
+
+class IOHandlerChombo2DHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "chombo2d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'velocity_x': 2,
+                                      'velocity_y': 3,
+                                      'acceleration_x': 4,
+                                      'acceleration_y': 5,
+                                      'mass': 6}
+
+
+class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "chombo1d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'velocity_x': 1,
+                                      'acceleration_x': 2,
+                                      'mass': 3}    
+
+class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
+    _dataset_type = "orion_chombo_native"
+
     def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files


https://bitbucket.org/yt_analysis/yt/commits/0ea507ec33a3/
Changeset:   0ea507ec33a3
Branch:      yt-3.0
User:        atmyers
Date:        2014-03-31 22:08:41
Summary:     merging
Affected #:  2 files

diff -r 933192a4e977b22fd066e390549fe3230050a49a -r 0ea507ec33a3bf790b3d5d55f118df908cb7cd80 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -119,6 +119,9 @@
     ImageArray, particle_filter, create_profile, \
     Profile1D, Profile2D, Profile3D
 
+# For backwards compatibility
+TimeSeriesData = deprecated_class(DatasetSeries)
+
 from yt.frontends.api import _frontend_container
 frontends = _frontend_container()
 

diff -r 933192a4e977b22fd066e390549fe3230050a49a -r 0ea507ec33a3bf790b3d5d55f118df908cb7cd80 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -759,6 +759,7 @@
         self.field_data = YTFieldData()
         self.weight_field = weight_field
         self.field_units = {}
+        ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
 
     def add_fields(self, fields):
         fields = ensure_list(fields)
@@ -792,7 +793,9 @@
     def _finalize_storage(self, fields, temp_storage):
         # We use our main comm here
         # This also will fill _field_data
-        # FIXME: Add parallelism and combining std stuff
+        temp_storage.values = self.comm.mpi_allreduce(temp_storage.values, op="sum", dtype="float64")
+        temp_storage.weight_values = self.comm.mpi_allreduce(temp_storage.weight_values, op="sum", dtype="float64")
+        temp_storage.used = self.comm.mpi_allreduce(temp_storage.used, op="sum", dtype="bool")
         blank = ~temp_storage.used
         self.used = temp_storage.used
         if self.weight_field is not None:


https://bitbucket.org/yt_analysis/yt/commits/6d6735a7838e/
Changeset:   6d6735a7838e
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-05 01:28:59
Summary:     some changes need to read in generic chombo data. Now works in 3D, but not in lower dimensions
Affected #:  3 files

diff -r 0ea507ec33a3bf790b3d5d55f118df908cb7cd80 -r 6d6735a7838e59d7521aab866ec62bee33e27711 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -113,7 +113,7 @@
         self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
         self._levels = [key for key in self._handle.keys() if key.startswith('level')]
         GridIndex.__init__(self,pf,dataset_type)
-        self._read_particles()
+        #self._read_particles()
 
     def _read_particles(self):
         
@@ -133,7 +133,11 @@
 
     def _detect_output_fields(self):
         ncomp = int(self._handle['/'].attrs['num_components'])
-        self.field_list = [("chombo", c[1]) for c in self._handle['/'].attrs.items()[-ncomp:]]
+        output_fields = []
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith("component"):
+                output_fields.append(val)
+        self.field_list = [("chombo", c) for c in output_fields]
           
     def _count_grids(self):
         self.num_grids = 0
@@ -220,11 +224,19 @@
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
         self.fluid_types += ("chombo",)
-        self._handle = h5py.File(filename,'r')
-        self.current_time = self._handle.attrs['time']
+        self._handle = h5py.File(filename, 'r')
+        D = self._handle['Chombo_global/'].attrs['SpaceDim']
+        if D == 1:
+            self.dataset_type = 'chombo1d_hdf5'
+        if D == 2:
+            self.dataset_type = 'chombo2d_hdf5'
+        try:
+            self.current_time = self._handle.attrs['time']
+        except KeyError:
+            self.current_time = 0.0
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
-        Dataset.__init__(self,filename,dataset_type)
+        Dataset.__init__(self,filename, self.dataset_type)
         self.storage_filename = storage_filename
         self.cosmological_simulation = False
 
@@ -257,13 +269,13 @@
         self.domain_dimensions = self._calc_domain_dimensions()
 
         if self.dimensionality == 1:
-            self._fieldinfo_fallback = Chombo1DFieldInfo
+#            self._fieldinfo_fallback = Chombo1DFieldInfo
             self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
 
         if self.dimensionality == 2:
-            self._fieldinfo_fallback = Chombo2DFieldInfo
+#            self._fieldinfo_fallback = Chombo2DFieldInfo
             self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
@@ -274,20 +286,23 @@
     def _calc_left_edge(self):
         fileh = self._handle
         dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        D = self.dimensionality
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
         return LE
 
     def _calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        D = self.dimensionality
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
         fileh.close()
         return RE
 
     def _calc_domain_dimensions(self):
         fileh = self._handle
-        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        D = self.dimensionality
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
         return R_index - L_index
 
     @classmethod
@@ -366,10 +381,10 @@
         if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = 3
         self.domain_left_edge = self._calc_left_edge()
         self.domain_right_edge = self._calc_right_edge()
         self.domain_dimensions = self._calc_domain_dimensions()
-        self.dimensionality = 3
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self.periodicity = (True, True, True)
 

diff -r 0ea507ec33a3bf790b3d5d55f118df908cb7cd80 -r 6d6735a7838e59d7521aab866ec62bee33e27711 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -31,53 +31,55 @@
 # We duplicate everything here from Boxlib, because we want to be able to
 # subclass it and that can be somewhat tricky.
 class ChomboFieldInfo(FieldInfoContainer):
-    known_other_fields = (
-        ("density", (rho_units, ["density"], None)),
-        ("energy-density", (eden_units, ["energy_density"], None)),
-        ("radiation-energy-density", (eden_units, ["radiation_energy_density"], None)),
-        ("X-momentum", (mom_units, ["momentum_x"], None)),
-        ("Y-momentum", (mom_units, ["momentum_y"], None)),
-        ("Z-momentum", (mom_units, ["momentum_z"], None)),
-        ("temperature", ("K", ["temperature"], None)),
-        ("X-magnfield", ("gauss", ["magnetic_field_x"], None)),
-        ("Y-magnfield", ("gauss", ["magnetic_field_y"], None)),
-        ("Z-magnfield", ("gauss", ["magnetic_field_z"], None)),
-    )
+    known_other_fields = ()
+    known_particle_fields = ()
+    # known_other_fields = (
+    #     ("density", (rho_units, ["density"], None)),
+    #     ("energy-density", (eden_units, ["energy_density"], None)),
+    #     ("radiation-energy-density", (eden_units, ["radiation_energy_density"], None)),
+    #     ("X-momentum", (mom_units, ["momentum_x"], None)),
+    #     ("Y-momentum", (mom_units, ["momentum_y"], None)),
+    #     ("Z-momentum", (mom_units, ["momentum_z"], None)),
+    #     ("temperature", ("K", ["temperature"], None)),
+    #     ("X-magnfield", ("gauss", ["magnetic_field_x"], None)),
+    #     ("Y-magnfield", ("gauss", ["magnetic_field_y"], None)),
+    #     ("Z-magnfield", ("gauss", ["magnetic_field_z"], None)),
+    # )
 
-    known_particle_fields = (
-        ("particle_mass", ("code_mass", [], None)),
-        ("particle_position_x", ("code_length", [], None)),
-        ("particle_position_y", ("code_length", [], None)),
-        ("particle_position_z", ("code_length", [], None)),
-        ("particle_momentum_x", (mom_units, [], None)),
-        ("particle_momentum_y", (mom_units, [], None)),
-        ("particle_momentum_z", (mom_units, [], None)),
-        # Note that these are *internal* agmomen
-        ("particle_angmomen_x", ("code_length**2/code_time", [], None)),
-        ("particle_angmomen_y", ("code_length**2/code_time", [], None)),
-        ("particle_angmomen_z", ("code_length**2/code_time", [], None)),
-        ("particle_mlast", ("code_mass", [], None)),
-        ("particle_r", ("code_length", [], None)),
-        ("particle_mdeut", ("code_mass", [], None)),
-        ("particle_n", ("", [], None)),
-        ("particle_mdot", ("code_mass/code_time", [], None)),
-        ("particle_burnstate", ("", [], None)),
-        ("particle_luminosity", ("", [], None)),
-        ("particle_id", ("", ["particle_index"], None)),
-    )
+    # known_particle_fields = (
+    #     ("particle_mass", ("code_mass", [], None)),
+    #     ("particle_position_x", ("code_length", [], None)),
+    #     ("particle_position_y", ("code_length", [], None)),
+    #     ("particle_position_z", ("code_length", [], None)),
+    #     ("particle_momentum_x", (mom_units, [], None)),
+    #     ("particle_momentum_y", (mom_units, [], None)),
+    #     ("particle_momentum_z", (mom_units, [], None)),
+    #     # Note that these are *internal* agmomen
+    #     ("particle_angmomen_x", ("code_length**2/code_time", [], None)),
+    #     ("particle_angmomen_y", ("code_length**2/code_time", [], None)),
+    #     ("particle_angmomen_z", ("code_length**2/code_time", [], None)),
+    #     ("particle_mlast", ("code_mass", [], None)),
+    #     ("particle_r", ("code_length", [], None)),
+    #     ("particle_mdeut", ("code_mass", [], None)),
+    #     ("particle_n", ("", [], None)),
+    #     ("particle_mdot", ("code_mass/code_time", [], None)),
+    #     ("particle_burnstate", ("", [], None)),
+    #     ("particle_luminosity", ("", [], None)),
+    #     ("particle_id", ("", ["particle_index"], None)),
+    # )
 
-    def setup_fluid_fields(self):
-        def _get_vel(axis):
-            def velocity(field, data):
-                return data["%smom" % ax]/data["density"]
-        for ax in 'xyz':
-            self.add_field("velocity_%s" % ax, function = _get_vel(ax),
-                           units = "cm/s")
-        self.add_field("thermal_energy",
-                       function = _thermal_energy,
-                       units = "erg/g")
-        self.add_field("thermal_energy_density",
-                       function = _thermal_energy_density,
-                       units = "erg/cm**3")
-        self.add_field("temperature", function=_temperature,
-                       units="K")
+    # def setup_fluid_fields(self):
+    #     def _get_vel(axis):
+    #         def velocity(field, data):
+    #             return data["%smom" % ax]/data["density"]
+    #     for ax in 'xyz':
+    #         self.add_field("velocity_%s" % ax, function = _get_vel(ax),
+    #                        units = "cm/s")
+    #     self.add_field("thermal_energy",
+    #                    function = _thermal_energy,
+    #                    units = "erg/g")
+    #     self.add_field("thermal_energy_density",
+    #                    function = _thermal_energy_density,
+    #                    units = "erg/cm**3")
+    #     self.add_field("temperature", function=_temperature,
+    #                    units="K")

diff -r 0ea507ec33a3bf790b3d5d55f118df908cb7cd80 -r 6d6735a7838e59d7521aab866ec62bee33e27711 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -46,11 +46,12 @@
     def field_dict(self):
         if self._field_dict is not None:
             return self._field_dict
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        temp =  self._handle['/'].attrs.items()[-ncomp:]
-        val, keys = zip(*temp)
-        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
-        self._field_dict = dict(zip(keys,val))
+        field_dict = {}
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith('component_'):
+                comp_number = int(re.match('component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._field_dict = field_dict
         return self._field_dict
         
     def _read_field_names(self,grid):


https://bitbucket.org/yt_analysis/yt/commits/f1e8e396ada5/
Changeset:   f1e8e396ada5
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-05 01:46:37
Summary:     a couple of changes that I forgot
Affected #:  2 files

diff -r 6d6735a7838e59d7521aab866ec62bee33e27711 -r f1e8e396ada5f157139786ff39119fe842429e60 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -230,6 +230,8 @@
             self.dataset_type = 'chombo1d_hdf5'
         if D == 2:
             self.dataset_type = 'chombo2d_hdf5'
+        if D == 3:
+            self.dataset_type = 'chombo_hdf5'
         try:
             self.current_time = self._handle.attrs['time']
         except KeyError:

diff -r 6d6735a7838e59d7521aab866ec62bee33e27711 -r f1e8e396ada5f157139786ff39119fe842429e60 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -31,55 +31,53 @@
 # We duplicate everything here from Boxlib, because we want to be able to
 # subclass it and that can be somewhat tricky.
 class ChomboFieldInfo(FieldInfoContainer):
-    known_other_fields = ()
-    known_particle_fields = ()
-    # known_other_fields = (
-    #     ("density", (rho_units, ["density"], None)),
-    #     ("energy-density", (eden_units, ["energy_density"], None)),
-    #     ("radiation-energy-density", (eden_units, ["radiation_energy_density"], None)),
-    #     ("X-momentum", (mom_units, ["momentum_x"], None)),
-    #     ("Y-momentum", (mom_units, ["momentum_y"], None)),
-    #     ("Z-momentum", (mom_units, ["momentum_z"], None)),
-    #     ("temperature", ("K", ["temperature"], None)),
-    #     ("X-magnfield", ("gauss", ["magnetic_field_x"], None)),
-    #     ("Y-magnfield", ("gauss", ["magnetic_field_y"], None)),
-    #     ("Z-magnfield", ("gauss", ["magnetic_field_z"], None)),
-    # )
+    known_other_fields = (
+        ("density", (rho_units, ["density"], None)),
+        ("energy-density", (eden_units, ["energy_density"], None)),
+        ("radiation-energy-density", (eden_units, ["radiation_energy_density"], None)),
+        ("X-momentum", (mom_units, ["momentum_x"], None)),
+        ("Y-momentum", (mom_units, ["momentum_y"], None)),
+        ("Z-momentum", (mom_units, ["momentum_z"], None)),
+        ("temperature", ("K", ["temperature"], None)),
+        ("X-magnfield", ("gauss", ["magnetic_field_x"], None)),
+        ("Y-magnfield", ("gauss", ["magnetic_field_y"], None)),
+        ("Z-magnfield", ("gauss", ["magnetic_field_z"], None)),
+    )
 
-    # known_particle_fields = (
-    #     ("particle_mass", ("code_mass", [], None)),
-    #     ("particle_position_x", ("code_length", [], None)),
-    #     ("particle_position_y", ("code_length", [], None)),
-    #     ("particle_position_z", ("code_length", [], None)),
-    #     ("particle_momentum_x", (mom_units, [], None)),
-    #     ("particle_momentum_y", (mom_units, [], None)),
-    #     ("particle_momentum_z", (mom_units, [], None)),
-    #     # Note that these are *internal* agmomen
-    #     ("particle_angmomen_x", ("code_length**2/code_time", [], None)),
-    #     ("particle_angmomen_y", ("code_length**2/code_time", [], None)),
-    #     ("particle_angmomen_z", ("code_length**2/code_time", [], None)),
-    #     ("particle_mlast", ("code_mass", [], None)),
-    #     ("particle_r", ("code_length", [], None)),
-    #     ("particle_mdeut", ("code_mass", [], None)),
-    #     ("particle_n", ("", [], None)),
-    #     ("particle_mdot", ("code_mass/code_time", [], None)),
-    #     ("particle_burnstate", ("", [], None)),
-    #     ("particle_luminosity", ("", [], None)),
-    #     ("particle_id", ("", ["particle_index"], None)),
-    # )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+        ("particle_momentum_x", (mom_units, [], None)),
+        ("particle_momentum_y", (mom_units, [], None)),
+        ("particle_momentum_z", (mom_units, [], None)),
+        # Note that these are *internal* agmomen
+        ("particle_angmomen_x", ("code_length**2/code_time", [], None)),
+        ("particle_angmomen_y", ("code_length**2/code_time", [], None)),
+        ("particle_angmomen_z", ("code_length**2/code_time", [], None)),
+        ("particle_mlast", ("code_mass", [], None)),
+        ("particle_r", ("code_length", [], None)),
+        ("particle_mdeut", ("code_mass", [], None)),
+        ("particle_n", ("", [], None)),
+        ("particle_mdot", ("code_mass/code_time", [], None)),
+        ("particle_burnstate", ("", [], None)),
+        ("particle_luminosity", ("", [], None)),
+        ("particle_id", ("", ["particle_index"], None)),
+    )
 
-    # def setup_fluid_fields(self):
-    #     def _get_vel(axis):
-    #         def velocity(field, data):
-    #             return data["%smom" % ax]/data["density"]
-    #     for ax in 'xyz':
-    #         self.add_field("velocity_%s" % ax, function = _get_vel(ax),
-    #                        units = "cm/s")
-    #     self.add_field("thermal_energy",
-    #                    function = _thermal_energy,
-    #                    units = "erg/g")
-    #     self.add_field("thermal_energy_density",
-    #                    function = _thermal_energy_density,
-    #                    units = "erg/cm**3")
-    #     self.add_field("temperature", function=_temperature,
-    #                    units="K")
+    def setup_fluid_fields(self):
+        def _get_vel(axis):
+            def velocity(field, data):
+                return data["%smom" % ax]/data["density"]
+        for ax in 'xyz':
+            self.add_field("velocity_%s" % ax, function = _get_vel(ax),
+                           units = "cm/s")
+        self.add_field("thermal_energy",
+                       function = _thermal_energy,
+                       units = "erg/g")
+        self.add_field("thermal_energy_density",
+                       function = _thermal_energy_density,
+                       units = "erg/cm**3")
+        self.add_field("temperature", function=_temperature,
+                       units="K")


https://bitbucket.org/yt_analysis/yt/commits/fcd3e273d041/
Changeset:   fcd3e273d041
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-05 02:01:03
Summary:     fixed it for 2D as well
Affected #:  1 file

diff -r f1e8e396ada5f157139786ff39119fe842429e60 -r fcd3e273d04185a7a04bce0ac6d8aefa8a4894ec yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -283,7 +283,7 @@
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
         
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True,) * self.dimensionality
+        self.periodicity = (True, True, True)
 
     def _calc_left_edge(self):
         fileh = self._handle


https://bitbucket.org/yt_analysis/yt/commits/79191d1b16d4/
Changeset:   79191d1b16d4
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-05 02:06:21
Summary:     also set .geometry attribute
Affected #:  1 file

diff -r fcd3e273d04185a7a04bce0ac6d8aefa8a4894ec -r 79191d1b16d41cc88a507f47fbdf045e58e85074 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -236,6 +236,7 @@
             self.current_time = self._handle.attrs['time']
         except KeyError:
             self.current_time = 0.0
+        self.geometry = "cartesian"
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
         Dataset.__init__(self,filename, self.dataset_type)


https://bitbucket.org/yt_analysis/yt/commits/9127cc395067/
Changeset:   9127cc395067
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-11 02:04:34
Summary:     seperate out Orion 2 field list from Chombo, which has no known fields by itself
Affected #:  2 files

diff -r 79191d1b16d41cc88a507f47fbdf045e58e85074 -r 9127cc39506706365ef9385da9cb38242bb05293 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -51,7 +51,7 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0

diff -r 79191d1b16d41cc88a507f47fbdf045e58e85074 -r 9127cc39506706365ef9385da9cb38242bb05293 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -28,9 +28,13 @@
 mom_units = "code_mass * code_length / code_time"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
 
+class ChomboFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+    known_particle_fields = ()
+
 # We duplicate everything here from Boxlib, because we want to be able to
 # subclass it and that can be somewhat tricky.
-class ChomboFieldInfo(FieldInfoContainer):
+class Orion2FieldInfo(ChomboFieldInfo):
     known_other_fields = (
         ("density", (rho_units, ["density"], None)),
         ("energy-density", (eden_units, ["energy_density"], None)),


https://bitbucket.org/yt_analysis/yt/commits/956b960a7641/
Changeset:   956b960a7641
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-11 07:23:48
Summary:     rename chombo to orion here, as the fields and parameters really come from orion
Affected #:  2 files

diff -r 9127cc39506706365ef9385da9cb38242bb05293 -r 956b960a76416de67610f6a9b675f27630f3d54c yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -28,8 +28,7 @@
      ST_CTIME
 
 from .definitions import \
-     chombo2enzoDict, \
-     yt2chomboFieldsDict, \
+     orion2enzoDict, \
      parameterDict \
 
 from yt.funcs import *

diff -r 9127cc39506706365ef9385da9cb38242bb05293 -r 956b960a76416de67610f6a9b675f27630f3d54c yt/frontends/chombo/definitions.py
--- a/yt/frontends/chombo/definitions.py
+++ b/yt/frontends/chombo/definitions.py
@@ -45,10 +45,8 @@
                  "NumberOfParticleAttributes": int,
                                  }
 
-chombo2enzoDict = {"GAMMA": "Gamma",
+orion2enzoDict = {"GAMMA": "Gamma",
                   "Ref_ratio": "RefineBy"
                                     }
+                                    
 
-yt2chomboFieldsDict = {}
-chombo2ytFieldsDict = {}
-


https://bitbucket.org/yt_analysis/yt/commits/5e7358c30e34/
Changeset:   5e7358c30e34
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-11 07:25:26
Summary:     minor formatting change
Affected #:  1 file

diff -r 956b960a76416de67610f6a9b675f27630f3d54c -r 5e7358c30e345e4e08237edf1307f9da5b083e81 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -28,8 +28,7 @@
      ST_CTIME
 
 from .definitions import \
-     orion2enzoDict, \
-     parameterDict \
+     orion2enzoDict, parameterDict
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \


https://bitbucket.org/yt_analysis/yt/commits/f2e18fc7ae43/
Changeset:   f2e18fc7ae43
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-26 00:36:27
Summary:     some more progress on getting chombo particles to work. Currently incomplete.
Affected #:  2 files

diff -r 5e7358c30e345e4e08237edf1307f9da5b083e81 -r f2e18fc7ae43bb34fc0ebee055c2bf0a23eca3f5 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -111,9 +111,14 @@
         self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
         self._levels = [key for key in self._handle.keys() if key.startswith('level')]
         GridIndex.__init__(self,pf,dataset_type)
-        #self._read_particles()
+
+        self._read_particles()
 
     def _read_particles(self):
+
+        # only do anything if the dataset contains particles
+        if not np.any([f[1].startswith('particle_') for f in self.field_list]):
+            return
         
         self.num_particles = 0
         particles_per_grid = []
@@ -130,12 +135,20 @@
         assert(self.num_particles == self.grid_particle_count.sum())
 
     def _detect_output_fields(self):
-        ncomp = int(self._handle['/'].attrs['num_components'])
+
+        # look for fluid fields
         output_fields = []
         for key, val in self._handle['/'].attrs.items():
             if key.startswith("component"):
                 output_fields.append(val)
         self.field_list = [("chombo", c) for c in output_fields]
+
+        # look for particle fields
+        particle_fields = []
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith("particle"):
+                particle_fields.append(val)
+        self.field_list.extend([("io", c) for c in particle_fields])        
           
     def _count_grids(self):
         self.num_grids = 0
@@ -242,7 +255,7 @@
         self.cosmological_simulation = False
 
         # These are parameters that I very much wish to get rid of.
-        self.parameters["HydroMethod"] = 'chombo' # always PPM DE
+        self.parameters["HydroMethod"] = 'chombo'
         self.parameters["DualEnergyFormalism"] = 0 
         self.parameters["EOSType"] = -1 # default
 
@@ -270,13 +283,11 @@
         self.domain_dimensions = self._calc_domain_dimensions()
 
         if self.dimensionality == 1:
-#            self._fieldinfo_fallback = Chombo1DFieldInfo
             self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
 
         if self.dimensionality == 2:
-#            self._fieldinfo_fallback = Chombo2DFieldInfo
             self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))

diff -r 5e7358c30e345e4e08237edf1307f9da5b083e81 -r f2e18fc7ae43bb34fc0ebee055c2bf0a23eca3f5 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -30,16 +30,6 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'position_y': 1,
-                                      'position_z': 2,
-                                      'velocity_x': 3,
-                                      'velocity_y': 4,
-                                      'velocity_z': 5,
-                                      'acceleration_x': 6,
-                                      'acceleration_y': 7,
-                                      'acceleration_z': 8,
-                                      'mass': 9}
 
     _field_dict = None
     @property
@@ -53,6 +43,19 @@
                 field_dict[val] = comp_number
         self._field_dict = field_dict
         return self._field_dict
+
+    _particle_field_index = None
+    @property
+    def particle_field_index(self):
+        if self._particle_field_index is not None:
+            return self._particle_field_index
+        field_dict = {}
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith('particle_'):
+                comp_number = int(re.match('particle_component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._particle_field_index = field_dict
+        return self._particle_field_index        
         
     def _read_field_names(self,grid):
         ncomp = int(self._handle['/'].attrs['num_components'])
@@ -118,9 +121,54 @@
                 ind += nd
         return rv
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+        #fields.sort(key=lambda a: self.field_dict[a[1]])
+        if selector.__class__.__name__ == "GridSelector":
+
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+            grid = chunks[0].objs[0]
+            lev = 'level_%i' % grid.Level
+
+            particles_per_grid = self._handle[lev]['particles:offsets'].value
+            items_per_particle = len(self.particle_field_index)
+
+            # compute global offset position
+            offsets = items_per_particle * np.cumsum(particles_per_grid)
+            offsets = np.append(np.array([0]), offsets)
+            offsets = np.array(offsets, dtype=np.int64)
+
+            # convert between the global grid id and the id on this level            
+            grid_levels = np.array([g.Level for g in self.pf.index.grids])
+            grid_ids    = np.array([g.id    for g in self.pf.index.grids])
+            grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+            lo = grid.id - grid_level_offset
+            hi = lo + 1
+
+            for ftype, fname in fields:
+                field_index = self.particle_field_index[fname]
+                data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
+                rv[ftype, fname] = data[field_index::items_per_particle]
+
+            return rv
+
+    # def _read_particle_coords(self, chunks, ptf):
+    #     chucks = list(chunks)
+    #     f = self._handle # shortcut
+    #     for chunk in chunks:
+    #         for grid in chunk.objs:
+    #             for ptype, field_list in sorted(ptf.items()):
+    #                 import pdb; pdb.set_trace()
+    #                 x = self._read_particles(grid, 'particle_position_x')
+    #                 y = self._read_particles(grid, 'particle_position_y')
+    #                 x = self._read_particles(grid, 'particle_position_z')
+    #                 yield ptype, (x, y, z)
+
     def _read_particles(self, grid, name):
 
-        field_index = self._particle_field_index[name]
+        field_index = self.particle_field_index[name]
         lev = 'level_%s' % grid.Level
 
         particles_per_grid = self._handle[lev]['particles:offsets'].value
@@ -132,8 +180,8 @@
         offsets = np.array(offsets, dtype=np.int64)
 
         # convert between the global grid id and the id on this level            
-        grid_levels = np.array([g.Level for g in self.pf.h.grids])
-        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_levels = np.array([g.Level for g in self.pf.index.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.index.grids])
         grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
         lo = grid.id - grid_level_offset
         hi = lo + 1
@@ -150,14 +198,6 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'position_y': 1,
-                                      'velocity_x': 2,
-                                      'velocity_y': 3,
-                                      'acceleration_x': 4,
-                                      'acceleration_y': 5,
-                                      'mass': 6}
-
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
@@ -167,11 +207,7 @@
     def __init__(self, pf, *args, **kwargs):
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'velocity_x': 1,
-                                      'acceleration_x': 2,
-                                      'mass': 3}    
+        self._handle = pf._handle   
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"


https://bitbucket.org/yt_analysis/yt/commits/ea0725a64798/
Changeset:   ea0725a64798
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-28 01:21:53
Summary:     reading in particle fields for Chombo works
Affected #:  1 file

diff -r f2e18fc7ae43bb34fc0ebee055c2bf0a23eca3f5 -r ea0725a6479827fa7a2b08549e21780910d350cc yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -104,6 +104,7 @@
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s grids",
                    size, [f2 for f1, f2 in fields], ng)
+
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
@@ -124,47 +125,26 @@
     def _read_particle_selection(self, chunks, selector, fields):
         rv = {}
         chunks = list(chunks)
-        #fields.sort(key=lambda a: self.field_dict[a[1]])
+
         if selector.__class__.__name__ == "GridSelector":
 
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
+                
             grid = chunks[0].objs[0]
-            lev = 'level_%i' % grid.Level
-
-            particles_per_grid = self._handle[lev]['particles:offsets'].value
-            items_per_particle = len(self.particle_field_index)
-
-            # compute global offset position
-            offsets = items_per_particle * np.cumsum(particles_per_grid)
-            offsets = np.append(np.array([0]), offsets)
-            offsets = np.array(offsets, dtype=np.int64)
-
-            # convert between the global grid id and the id on this level            
-            grid_levels = np.array([g.Level for g in self.pf.index.grids])
-            grid_ids    = np.array([g.id    for g in self.pf.index.grids])
-            grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
-            lo = grid.id - grid_level_offset
-            hi = lo + 1
 
             for ftype, fname in fields:
-                field_index = self.particle_field_index[fname]
-                data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-                rv[ftype, fname] = data[field_index::items_per_particle]
+                rv[ftype, fname] = self._read_particles(grid, fname)
 
             return rv
 
-    # def _read_particle_coords(self, chunks, ptf):
-    #     chucks = list(chunks)
-    #     f = self._handle # shortcut
-    #     for chunk in chunks:
-    #         for grid in chunk.objs:
-    #             for ptype, field_list in sorted(ptf.items()):
-    #                 import pdb; pdb.set_trace()
-    #                 x = self._read_particles(grid, 'particle_position_x')
-    #                 y = self._read_particles(grid, 'particle_position_y')
-    #                 x = self._read_particles(grid, 'particle_position_z')
-    #                 yield ptype, (x, y, z)
+        rv = {f:np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    data = self._read_particles(grid, fname)
+                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
+        return rv
 
     def _read_particles(self, grid, name):
 


https://bitbucket.org/yt_analysis/yt/commits/0386624559cd/
Changeset:   0386624559cd
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-28 08:02:18
Summary:     handle the case where the grid has no particles
Affected #:  2 files

diff -r ea0725a6479827fa7a2b08549e21780910d350cc -r 0386624559cd027f277cf5488ecbb2024abed7ef yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -28,10 +28,12 @@
 mom_units = "code_mass * code_length / code_time"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
 
+# Chombo does not have any known fields by itself.
 class ChomboFieldInfo(FieldInfoContainer):
     known_other_fields = ()
     known_particle_fields = ()
 
+# Orion 2 Fields
 # We duplicate everything here from Boxlib, because we want to be able to
 # subclass it and that can be somewhat tricky.
 class Orion2FieldInfo(ChomboFieldInfo):

diff -r ea0725a6479827fa7a2b08549e21780910d350cc -r 0386624559cd027f277cf5488ecbb2024abed7ef yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -130,7 +130,7 @@
 
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
-                
+
             grid = chunks[0].objs[0]
 
             for ftype, fname in fields:
@@ -166,6 +166,10 @@
         lo = grid.id - grid_level_offset
         hi = lo + 1
 
+        # handle the case where this grid has no particles
+        if (offsets[lo] == offsets[hi]):
+            return np.array([], dtype=np.float64)
+
         data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
         return data[field_index::items_per_particle]
 


https://bitbucket.org/yt_analysis/yt/commits/d9386f1810eb/
Changeset:   d9386f1810eb
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-29 00:43:51
Summary:     adding some comments
Affected #:  1 file

diff -r 0386624559cd027f277cf5488ecbb2024abed7ef -r d9386f1810ebd3fbc981e12ec56cdb5aeab0566b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -134,6 +134,9 @@
 
         assert(self.num_particles == self.grid_particle_count.sum())
 
+    # Chombo datasets, by themselves, have no "known" fields. However, 
+    # we will look for "fluid" fields by finding the string "component" in
+    # the output file, and "particle" fields by finding the string "particle".
     def _detect_output_fields(self):
 
         # look for fluid fields
@@ -236,6 +239,8 @@
                  storage_filename = None, ini_filename = None):
         self.fluid_types += ("chombo",)
         self._handle = h5py.File(filename, 'r')
+
+        # look up the dimensionality of the dataset
         D = self._handle['Chombo_global/'].attrs['SpaceDim']
         if D == 1:
             self.dataset_type = 'chombo1d_hdf5'
@@ -243,10 +248,14 @@
             self.dataset_type = 'chombo2d_hdf5'
         if D == 3:
             self.dataset_type = 'chombo_hdf5'
+
+        # some datasets will not be time-dependent, make
+        # sure we handle that here.
         try:
             self.current_time = self._handle.attrs['time']
         except KeyError:
             self.current_time = 0.0
+
         self.geometry = "cartesian"
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
@@ -282,6 +291,7 @@
         self.domain_right_edge = self._calc_right_edge()
         self.domain_dimensions = self._calc_domain_dimensions()
 
+        # if a lower-dimensional dataset, set up pseudo-3D stuff here.
         if self.dimensionality == 1:
             self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
@@ -323,6 +333,7 @@
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
+                # ORION2 simulations should always have this:
                 valid = not ('CeilVA_mass' in fileh.attrs.keys())
                 fileh.close()
                 return valid


https://bitbucket.org/yt_analysis/yt/commits/6c327d7efc4d/
Changeset:   6c327d7efc4d
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-29 02:01:41
Summary:     fixing an issue found by Jenkins relating to the is_valid method
Affected #:  1 file

diff -r d9386f1810ebd3fbc981e12ec56cdb5aeab0566b -r 6c327d7efc4d4a427a31b1c81f300278ec71919d yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -334,7 +334,7 @@
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
                 # ORION2 simulations should always have this:
-                valid = not ('CeilVA_mass' in fileh.attrs.keys())
+                valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
                 fileh.close()
                 return valid
             except:


https://bitbucket.org/yt_analysis/yt/commits/7e36c368a755/
Changeset:   7e36c368a755
Branch:      yt-3.0
User:        atmyers
Date:        2014-04-30 00:02:18
Summary:     this is not needed, and was creating problems when asking for the field "dx"
Affected #:  1 file

diff -r 6c327d7efc4d4a427a31b1c81f300278ec71919d -r 7e36c368a75570d1e447b498bc1543c784118ee3 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -83,7 +83,7 @@
     def _setup_dx(self):
         # has already been read in and stored in index
         self.dds = self.index.dds_list[self.Level]
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        #self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(GridIndex):
 
@@ -387,6 +387,7 @@
 class Orion2Dataset(ChomboDataset):
 
     _index_class = Orion2Hierarchy
+    _field_info_class = Orion2FieldInfo
 
     def __init__(self, filename, dataset_type='orion_chombo_native',
                  storage_filename = None, ini_filename = None):


https://bitbucket.org/yt_analysis/yt/commits/f4b01f81b71d/
Changeset:   f4b01f81b71d
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-02 00:11:13
Summary:     smarter parsing of the ini filenames
Affected #:  1 file

diff -r 7e36c368a75570d1e447b498bc1543c784118ee3 -r f4b01f81b71d15abf1fa1b84239e4d800ba67abb yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -329,7 +329,18 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        if not (os.path.isfile('pluto.ini') and os.path.isfile('orion2.ini')):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+
+        if not (pluto_ini_file_exists and orion2_ini_file_exists):
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
@@ -441,11 +452,21 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
         
-        if (os.path.isfile('orion2.ini')):
+        if orion2_ini_file_exists:
             return True
 
-        if not (os.path.isfile('pluto.ini')):
+        if not pluto_ini_file_exists:
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]


https://bitbucket.org/yt_analysis/yt/commits/f034ddb87eaa/
Changeset:   f034ddb87eaa
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-02 00:29:44
Summary:     orion owns the field dict, not chombo
Affected #:  1 file

diff -r f4b01f81b71d15abf1fa1b84239e4d800ba67abb -r f034ddb87eaaaae370c853637ffcfb1db2b0b8e8 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -436,8 +436,8 @@
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
-            if chombo2enzoDict.has_key(param):
-                paramName = chombo2enzoDict[param]
+            if orion2enzoDict.has_key(param):
+                paramName = orion22enzoDict[param]
                 t = map(parameterDict[paramName], vals.split())
                 if len(t) == 1:
                     if paramName == "GAMMA":


https://bitbucket.org/yt_analysis/yt/commits/882c62a0410a/
Changeset:   882c62a0410a
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-02 00:37:43
Summary:     also fix the ini filename parsing here
Affected #:  1 file

diff -r f034ddb87eaaaae370c853637ffcfb1db2b0b8e8 -r 882c62a0410a52a788f27f267dffdec3f60ca2b3 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -412,8 +412,13 @@
         exists in the plot file directory. If one does, attempt to parse it.
         Otherwise grab the dimensions from the hdf5 file.
         """
-        
-        if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
+
+        orion2_ini_file_exists = False
+        dir_name = os.path.dirname(os.path.abspath(self.fullplotdir))
+        orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+        orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+
+        if orion2_ini_file_exists: self._parse_inputs_file('orion2.ini')
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
         self.dimensionality = 3
@@ -437,7 +442,7 @@
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
             if orion2enzoDict.has_key(param):
-                paramName = orion22enzoDict[param]
+                paramName = orion2enzoDict[param]
                 t = map(parameterDict[paramName], vals.split())
                 if len(t) == 1:
                     if paramName == "GAMMA":


https://bitbucket.org/yt_analysis/yt/commits/ca847f702e5d/
Changeset:   ca847f702e5d
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-02 00:41:35
Summary:     np.any -> any
Affected #:  1 file

diff -r 882c62a0410a52a788f27f267dffdec3f60ca2b3 -r ca847f702e5d4abc8a729b08f2571f09568f1656 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -117,7 +117,7 @@
     def _read_particles(self):
 
         # only do anything if the dataset contains particles
-        if not np.any([f[1].startswith('particle_') for f in self.field_list]):
+        if not any([f[1].startswith('particle_') for f in self.field_list]):
             return
         
         self.num_particles = 0


https://bitbucket.org/yt_analysis/yt/commits/a7594846dc71/
Changeset:   a7594846dc71
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-02 00:58:01
Summary:     actually, let's just get rid of the parameters dictionary altogether
Affected #:  2 files

diff -r ca847f702e5d4abc8a729b08f2571f09568f1656 -r a7594846dc711f61492bdf9b27085249109753b1 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -27,9 +27,6 @@
 from stat import \
      ST_CTIME
 
-from .definitions import \
-     orion2enzoDict, parameterDict
-
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
      AMRGridPatch
@@ -441,19 +438,8 @@
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
-            if orion2enzoDict.has_key(param):
-                paramName = orion2enzoDict[param]
-                t = map(parameterDict[paramName], vals.split())
-                if len(t) == 1:
-                    if paramName == "GAMMA":
-                        self.gamma = t[0]
-                    else:
-                        self.parameters[paramName] = t[0]
-                else:
-                    if paramName == "RefineBy":
-                        self.parameters[paramName] = t[0]
-                    else:
-                        self.parameters[paramName] = t
+            if param == "GAMMA":
+                self.gamma = vals
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r ca847f702e5d4abc8a729b08f2571f09568f1656 -r a7594846dc711f61492bdf9b27085249109753b1 yt/frontends/chombo/definitions.py
--- a/yt/frontends/chombo/definitions.py
+++ b/yt/frontends/chombo/definitions.py
@@ -13,40 +13,4 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                                 }
 
-orion2enzoDict = {"GAMMA": "Gamma",
-                  "Ref_ratio": "RefineBy"
-                                    }
-                                    
-


https://bitbucket.org/yt_analysis/yt/commits/894c4d482a25/
Changeset:   894c4d482a25
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-02 08:49:06
Summary:     fixing a conflict
Affected #:  1 file

diff -r a7594846dc711f61492bdf9b27085249109753b1 -r 894c4d482a2569aad3c5efebf9b5857d7c722ac2 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -79,7 +79,7 @@
 
     def _setup_dx(self):
         # has already been read in and stored in index
-        self.dds = self.index.dds_list[self.Level]
+        self.dds = self.pf.arr(self.index.dds_list[self.Level], "code_length")
         #self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(GridIndex):


https://bitbucket.org/yt_analysis/yt/commits/78991726707f/
Changeset:   78991726707f
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-02 08:50:50
Summary:     remove this un-needed lin
Affected #:  1 file

diff -r 894c4d482a2569aad3c5efebf9b5857d7c722ac2 -r 78991726707ffa00529c06fa793f1cf25686664e yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -80,7 +80,6 @@
     def _setup_dx(self):
         # has already been read in and stored in index
         self.dds = self.pf.arr(self.index.dds_list[self.Level], "code_length")
-        #self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(GridIndex):
 


https://bitbucket.org/yt_analysis/yt/commits/7e5db57e2994/
Changeset:   7e5db57e2994
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-04 03:10:19
Summary:     Chombo parent / child information was not getting filled correctly in 3.0
Affected #:  1 file

diff -r 78991726707ffa00529c06fa793f1cf25686664e -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -54,8 +54,8 @@
     def __init__(self, id, index, level, start, stop):
         AMRGridPatch.__init__(self, id, filename = index.index_filename,
                               index = index)
-        self.Parent = []
-        self.Children = []
+        self._parent_id = []
+        self._children_ids = []
         self.Level = level
         self.ActiveDimensions = stop - start + 1
 
@@ -67,7 +67,7 @@
         """
         if self.start_index != None:
             return self.start_index
-        if self.Parent == []:
+        if self.Parent is None:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
             return np.rint(start_index).astype('int64').ravel()
@@ -81,6 +81,18 @@
         # has already been read in and stored in index
         self.dds = self.pf.arr(self.index.dds_list[self.Level], "code_length")
 
+    @property
+    def Parent(self):
+        if len(self._parent_id) == 0:
+            return None
+        return [self.index.grids[pid - self._id_offset]
+                for pid in self._parent_id]
+
+    @property
+    def Children(self):
+        return [self.index.grids[cid - self._id_offset]
+                for cid in self._children_ids]
+
 class ChomboHierarchy(GridIndex):
 
     grid = ChomboGrid
@@ -148,7 +160,7 @@
             if key.startswith("particle"):
                 particle_fields.append(val)
         self.field_list.extend([("io", c) for c in particle_fields])        
-          
+
     def _count_grids(self):
         self.num_grids = 0
         for lev in self._levels:
@@ -156,6 +168,7 @@
 
     def _parse_index(self):
         f = self._handle # shortcut
+        self.max_level = f.attrs['max_level']
 
         grids = []
         self.dds_list = []
@@ -193,6 +206,7 @@
                                start = si, stop = ei)
                 grids.append(pg)
                 grids[-1]._level_id = level_id
+                self.grid_levels[i] = level_number
                 self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
                 self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
                 self.grid_particle_count[i] = 0
@@ -206,7 +220,6 @@
         for g in self.grids:
             g._prepare_grid()
             g._setup_dx()
-        self.max_level = self.grid_levels.max()
 
     def _setup_derived_fields(self):
         self.derived_field_list = []


https://bitbucket.org/yt_analysis/yt/commits/e80eecb7cbb5/
Changeset:   e80eecb7cbb5
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-04 05:20:26
Summary:     Merging with mainline
Affected #:  200 files

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,3 +5160,4 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,4 +7,9 @@
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
 prune doc/build/
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn
 recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -34,11 +34,11 @@
  * Do not import "*" from anything other than "yt.funcs".
  * Internally, only import from source files directly -- instead of:
 
-   from yt.visualization.api import PlotCollection
+   from yt.visualization.api import ProjectionPlot
 
    do:
 
-   from yt.visualization.plot_collection import PlotCollection
+   from yt.visualization.plot_window import ProjectionPlot
 
  * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -43,7 +43,7 @@
 To indicate the return type of a given object, you can reference it using this
 construction:
 
-    This function returns a :class:`PlotCollection`.
+    This function returns a :class:`ProjectionPlot`.
 
 To reference a function, you can use:
 
@@ -51,4 +51,4 @@
 
 To reference a method, you can use:
 
-    To add a projection, use :meth:`PlotCollection.add_projection`.
+    To add a projection, use :meth:`ProjectionPlot.set_width`.

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -15,8 +15,13 @@
     required_arguments = 1
     optional_arguments = 1
     option_spec = {'skip_exceptions' : directives.flag}
+    final_argument_whitespace = True
 
-    def run(self):
+    def run(self): # check if there are spaces in the notebook name
+        nb_path = self.arguments[0]
+        if ' ' in nb_path: raise ValueError(
+            "Due to issues with docutils stripping spaces from links, white "
+            "space is not allowed in notebook filenames '{0}'".format(nb_path))
         # check if raw html is supported
         if not self.state.document.settings.raw_enabled:
             raise self.warning('"%s" directive disabled.' % self.name)
@@ -24,10 +29,11 @@
         # get path to notebook
         source_dir = os.path.dirname(
             os.path.abspath(self.state.document.current_source))
-        nb_basename = os.path.basename(self.arguments[0])
+        nb_filename = self.arguments[0]
+        nb_basename = os.path.basename(nb_filename)
         rst_file = self.state_machine.document.attributes['source']
         rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.join(rst_dir, nb_basename)
+        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
 
         # Move files around.
         rel_dir = os.path.relpath(rst_dir, setup.confdir)
@@ -89,7 +95,6 @@
         return [nb_node]
 
 
-
 class notebook_node(nodes.raw):
     pass
 
@@ -109,6 +114,7 @@
     # http://imgur.com/eR9bMRH
     header = header.replace('<style', '<style scoped="scoped"')
     header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n', '')
+    header = header.replace("code,pre{", "code{")
 
     # Filter out styles that conflict with the sphinx theme.
     filter_strings = [
@@ -120,8 +126,16 @@
     ]
     filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
 
+    line_begin_strings = [
+        'pre{',
+        'p{margin'
+        ]
+
     header_lines = filter(
         lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
+    header_lines = filter(
+        lambda x: not any([x.startswith(s) for s in line_begin_strings]), header_lines)
+
     header = '\n'.join(header_lines)
 
     # concatenate raw html lines

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -0,0 +1,401 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "heading",
+     "level": 1,
+     "metadata": {},
+     "source": [
+      "Full Halo Analysis"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Creating a Catalog"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Here we put everything together to perform some realistic analysis. First we load a full simulation dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.halo_analysis.api import *\n",
+      "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+      "\n",
+      "# Load the data set with the full simulation information\n",
+      "data_pf = load(path+'Enzo_64/RD0006/RedshiftOutput0006')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we load a rockstar halos binary file. This is the output from running the rockstar halo finder on the dataset loaded above. It is also possible to require the HaloCatalog to find the halos in the full simulation dataset at runtime by specifying a `finder_method` keyword."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Load the rockstar data files\n",
+      "halos_pf = load(path+'rockstar_halos/halos_0.0.bin')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "From these two loaded datasets we create a halo catalog object. No analysis is done at this point, we are simply defining an object we can add analysis tasks to. These analysis tasks will be run in the order they are added to the halo catalog object."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Instantiate a catalog using those two paramter files\n",
+      "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
+      "                 output_dir = path+'halo_catalog')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The first analysis task we add is a filter for the most massive halos; those with masses great than $10^{14}~M_\\odot$. Note that all following analysis will only be performed on these massive halos and we will not waste computational time calculating quantities for halos we are not interested in. This is a result of adding this filter first. If we had called `add_filter` after some other `add_quantity` or `add_callback` to the halo catalog, the quantity and callback calculations would have been performed for all halos, not just those which pass the filter."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "# Filter out less massive halos\n",
+      "hc.add_filter(\"quantity_value\", \"particle_mass\", \">\", 1e14, \"Msun\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Finding Radial Profiles"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Our first analysis goal is going to be constructing radial profiles for our halos. We would like these profiles to be in terms of the virial radius. Unfortunately we have no guarantee that values of center and virial radius recorded by the halo finder are actually physical. Therefore we should recalculate these quantities ourselves using the values recorded by the halo finder as a starting point."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The first step is going to be creating a sphere object that we will create radial profiles along. This attaches a sphere data object to every halo left in the catalog."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# attach a sphere object to each halo whose radius extends to twice the radius of the halo\n",
+      "hc.add_callback(\"sphere\", factor=2.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cummulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# use the sphere to calculate radial profiles of gas density weighted by cell volume in terms of the virial radius\n",
+      "hc.add_callback(\"profile\", x_field=\"radius\",\n",
+      "                y_fields=[(\"gas\", \"overdensity\")],\n",
+      "                weight_field=\"cell_volume\", \n",
+      "                accumulation=False,\n",
+      "                storage=\"virial_quantities_profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we calculate the virial radius of halo using the sphere object. As this is a callback, not a quantity, the virial radius will not be written out with the rest of the halo properties in the final halo catalog. This also has a `profile_storage` keyword to specify where the radial profiles are stored that will allow the callback to calculate the relevant virial quantities. We supply this keyword with the same string we gave to `storage` in the last `profile` callback."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Define a virial radius for the halo.\n",
+      "hc.add_callback(\"virial_quantities\", [\"radius\"], \n",
+      "                profile_storage = \"virial_quantities_profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now that we have calculated the virial radius, we delete the profiles we used to find it."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('delete_attribute','virial_quantities_profiles')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now that we have calculated virial quantities we can add a new sphere that is aware of the virial radius we calculated above."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('sphere', radius_field='radius_200', factor = 5,\n",
+      "        field_parameters = dict(virial_radius=('quantity','radius_200')))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Using this new sphere, we calculate a gas temperature profile along the virial radius, weighted by the cell mass."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('profile', 'virial_radius', [('gas','temperature')],\n",
+      "        storage = 'virial_profiles',\n",
+      "        weight_field = 'cell_mass', \n",
+      "        accumulation=False, output_dir='profiles')\n"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "As profiles are not quantities they will not automatically be written out in the halo catalog; thus in order to be reloadable we must write them out explicitly through a callback of `save_profiles`. This makes sense because they have an extra dimension for each halo along the profile axis. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Save the profiles\n",
+      "hc.add_callback(\"save_profiles\", storage=\"virial_profiles\", output_dir=\"profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We then create the halo catalog. Remember, no analysis is done before this call to create. By adding callbacks and filters we are simply queuing up the actions we want to take that will all run now."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "hc.create()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Reloading HaloCatalogs"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally we load these profiles back in and make a pretty plot. It is not strictly necessary to reload the profiles in this notebook, but we show this process here to illustrate that this step may be performed completely separately from the rest of the script. This workflow allows you to create a single script that will allow you to perform all of the analysis that requires the full dataset. The output can then be saved in a compact form where only the necessarily halo quantities are stored. You can then download this smaller dataset to a local computer and run any further non-computationally intense analysis and design the appropriate plots."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can load a previously saved halo catalog by using the `load` command. We then create a `HaloCatalog` object from just this dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "halos_pf =  load(path+'halo_catalog/halo_catalog.0.h5')\n",
+      "\n",
+      "hc_reloaded = HaloCatalog(halos_pf=halos_pf, output_dir=path+'halo_catalog')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      " Just as profiles are saved seperately throught the `save_profiles` callback they also must be loaded separately using the `load_profiles` callback."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc_reloaded.add_callback('load_profiles',storage='virial_profiles',\n",
+      "        output_dir='profiles')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Calling `load` is the equivalent of calling `create` earlier, but defaults to to not saving new information. This means that the callback to `load_profiles` is not run until we call `load` here."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "hc_reloaded.load()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Plotting Radial Profiles"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In the future ProfilePlot will be able to properly interpret the loaded profiles of `Halo` and `HaloCatalog` objects, but this functionality is not yet implemented. In the meantime, we show a quick method of viewing a profile for a single halo."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The individual `Halo` objects contained in the `HaloCatalog` can be accessed through the `halo_list` attribute. This gives us access to the dictionary attached to each halo where we stored the radial profiles."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "halo = hc_reloaded.halo_list[0]\n",
+      "\n",
+      "radius = halo.virial_profiles['virial_radius']\n",
+      "temperature = halo.virial_profiles[u\"('gas', 'temperature')\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Here we quickly use matplotlib to create a basic plot of the radial profile of this halo. When `ProfilePlot` is properly configured to accept Halos and HaloCatalogs the full range of yt plotting tools will be accessible."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "import matplotlib.pyplot as plt\n",
+      "\n",
+      "plt.plot(radius,temperature)\n",
+      "\n",
+      "plt.semilogy()\n",
+      "plt.xlabel('$\\mathrm{R/R_{vir}}$')\n",
+      "plt.ylabel('$\\mathrm{Temperature~[K]}$')\n",
+      "\n",
+      "plt.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -1,14 +1,13 @@
 Halo Analysis
 =============
 
-Halo finding, mass functions, merger trees, and profiling.
+Using halo catalogs, understanding the different halo finding methods,
+and using the halo mass function.
 
 .. toctree::
    :maxdepth: 1
 
-   running_halofinder
+   halo_catalogs
+   halo_finding
    halo_mass_function
-   hmf_howto
-   merger_tree
-   halo_profiling
-   ellipsoid_analysis
+   halo_analysis_example

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/analyzing/analysis_modules/halo_analysis_example.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
@@ -0,0 +1,4 @@
+Using HaloCatalogs to do Analysis
+---------------------------------
+
+.. notebook:: Halo_Analysis.ipynb

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -0,0 +1,221 @@
+
+Creating Halo Catalogs
+======================
+
+In yt 3.0, operations relating to the analysis of halos (halo finding,
+merger tree creation, and individual halo analysis) are all brought 
+together into a single framework. This framework is substantially
+different from the limited framework included in yt-2.x and is only 
+backwards compatible in that output from old halo finders may be loaded.
+
+A catalog of halos can be created from any initial dataset given to halo 
+catalog through data_pf. These halos can be found using friends-of-friends,
+HOP, and Rockstar. The finder_method keyword dictates which halo finder to
+use. The available arguments are 'fof', 'hop', and'rockstar'. For more
+details on the relative differences between these halo finders see 
+:ref:`halo_finding`.
+
+.. code-block:: 
+    from yt.mods import *
+    from yt.analysis_modules.halo_analysis.api import HaloCatalog
+    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+    hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+
+A halo catalog may also be created from already run rockstar outputs. 
+This method is not implemented for previously run friends-of-friends or 
+HOP finders. Even though rockstar creates one file per processor, 
+specifying any one file allows the full catalog to be loaded. Here we 
+only specify the file output by the processor with ID 0. Note that the 
+argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
+
+.. code-block:: 
+    halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+    hc = HaloCatalog(halos_pf=halos_pf)
+
+Although supplying only the binary output of the rockstar halo finder 
+is sufficient for creating a halo catalog, it is not possible to find 
+any new information about the identified halos. To associate the halos 
+with the dataset from which they were found, supply arguments to both 
+halos_pf and data_pf.
+
+.. code-block::
+    halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+    hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+
+A data container can also be supplied via keyword data_source, 
+associated with either dataset, to control the spatial region in 
+which halo analysis will be performed.
+
+Analysis Using Halo Catalogs
+============================
+
+Analysis is done by adding actions to the HaloCatalog. Each action is 
+represented by a callback function that will be run on each halo. 
+There are three types of actions:
+
+    - Filters
+    - Quantities
+    - Callbacks
+
+All interaction with this analysis can be performed by importing from 
+halo_analysis.
+
+Filters
+-------
+
+A filter is a function that returns True or False. If the return value 
+is True, any further queued analysis will proceed and the halo in 
+question will be added to the final catalog. If the return value False, 
+further analysis will not be performed and the halo will not be included 
+in the final catalog.
+
+An example of adding a filter:
+
+.. code-block::
+
+    hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
+
+Currently quantity_value is the only available filter, but more can be 
+added by the user by defining a function that accepts a halo object as 
+the first argument and then adding it as an available filter. If you 
+think that your filter may be of use to the general community, you can 
+add it to yt/analysis_modules/halo_analysis/halo_filters.py and issue a 
+pull request.
+
+An example of defining your own filter:
+
+.. code-block::
+    def my_filter_function(halo):
+        
+        # Define condition for filter
+        filter_value = True
+        
+        # Return a boolean value 
+        return filter_value
+
+    # Add your filter to the filter registry
+    add_filter("my_filter", my_filter_function)
+
+    # ... Later on in your script
+    hc.add_filter("my_filter")
+
+Quantities
+----------
+
+A quantity is a call back that returns a value or values. The return values 
+are stored within the halo object in a dictionary called “quantities.” At 
+the end of the analysis, all of these quantities will be written to disk as 
+the final form of the generated “halo catalog.”
+
+Quantities may be available in the initial fields found in the halo catalog, 
+or calculated from a function after supplying a definition. An example 
+definition of center of mass is shown below. Currently available quantities 
+are center_of_mass and bulk_velocity. Their definitions are available in 
+yt/analysis_modules/halo_analysis/halo_quantities.py . If you think that 
+your quantity may be of use to the general community, add it to 
+halo_quantities.py and issue a pull request.
+
+An example of adding a quantity:
+
+.. code-block::
+    hc.add_quantity('center_of_mass')
+
+An example of defining your own quantity:
+
+.. code-block::
+
+    def my_quantity_function(halo):
+        # Define quantity to return
+        quantity = 5
+        
+        return quantity
+
+    # Add your filter to the filter registry
+    add_quantity('my_quantity', my_quantity_function)
+
+
+    # ... Later on in your script
+    hc.add_quantity("my_quantity") 
+
+Callbacks
+---------
+
+A callback is actually the super class for quantities and filters and 
+is a general purpose function that does something, anything, to a Halo 
+object. This can include hanging new attributes off the Halo object, 
+performing analysis and writing to disk, etc. A callback does not return 
+anything.
+
+An example of using a pre-defined callback where we create a sphere for 
+each halo with a radius that is twice the saved “radius”.
+
+.. code-block::
+    hc.add_callback("sphere", factor=2.0)
+    
+Currently available callbacks are located in 
+yt/analysis_modules/halo_analysis/halo_callbacks.py. New callbacks may 
+be added by using the syntax shown below. If you think that your 
+callback may be of use to the general community, add it to 
+halo_callbacks.py and issue a pull request
+
+An example of defining your own callback:
+
+.. code-block::
+
+    def my_callback_function(halo):
+        # Perform some callback actions here
+        x = 2
+        halo.x_val = x
+
+    # Add the callback to the callback registry
+    add_callback('my_callback', my_callback_function)
+
+
+    # ...  Later on in your script
+    hc.add_callback("my_callback")
+
+Running Analysis
+================
+
+After all callbacks, quantities, and filters have been added, the 
+analysis begins with a call to HaloCatalog.create.
+
+.. code-block::
+    hc.create()
+
+The save_halos keyword determines whether the actual Halo objects 
+are saved after analysis on them has completed or whether just the 
+contents of their quantities dicts will be retained for creating the 
+final catalog. The looping over halos uses a call to parallel_objects 
+allowing the user to control how many processors work on each halo. 
+The final catalog is written to disk int the output directory given 
+when the HaloCatalog object was created.
+
+All callbacks, quantities, and filters are stored in an “actions” list, 
+meaning that they are executed in the same order in which they were added. 
+This enables the use of simple, reusable, single action callbacks that 
+depend on each other. This also prevents unecessary computation by allowing 
+the user to add filters at multiple stages to skip remaining analysis if it 
+is not warranted.
+
+Saving and Reloading Halo Catalogs
+==================================
+
+A HaloCatalog saved to disk can be reloaded as yt dataset with the 
+standard call to load. Any side data, such as profiles, can be reloaded 
+with a load_profiles callback and a call to HaloCatalog.load.
+
+.. code-block::
+    hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+    hc = HaloCatalog(halos_pf=hpf,
+                     output_dir="halo_catalogs/catalog_0046")
+    hc.add_callback("load_profiles", output_dir="profiles",
+                    filename="virial_profiles")
+    hc.load()
+
+Summary
+=======
+
+For a full example of how to use these methods together see 
+:ref:`halo_analysis_example`.

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:52f186664831f5290b31ec433114927b9771e224bd79d0c82dd3d9a8d9c09bf6"
+  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -307,7 +307,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`:"
+      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`.  You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity."
      ]
     },
     {
@@ -402,11 +402,13 @@
       "\n",
       "print a/b\n",
       "print (a/b).in_cgs()\n",
+      "print (a/b).in_mks()\n",
       "print (a/b).in_units('km/s')\n",
       "print ''\n",
       "\n",
       "print a*b\n",
-      "print (a*b).in_cgs()"
+      "print (a*b).in_cgs()\n",
+      "print (a*b).in_mks()"
      ],
      "language": "python",
      "metadata": {},
@@ -433,7 +435,10 @@
       "from yt.utilities.physical_constants import G, kboltz\n",
       "\n",
       "print \"Newton's constant: \", G\n",
-      "print \"Boltzmann constant: \", kboltz"
+      "print \"Newton's constant in MKS: \", G.in_mks(), \"\\n\"\n",
+      "\n",
+      "print \"Boltzmann constant: \", kboltz\n",
+      "print \"Boltzmann constant in MKS: \", kboltz.in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8e1a5db9e3869bcf761ff39c5a95d21458b7c4205f00da3d3f973d398422a466"
+  "signature": "sha256:9e7ac626b3609cf5f3fb2d4ebc6e027ed923ab1c22f0acc212e42fc7535e3205"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -73,6 +73,7 @@
       "mass = dd['cell_mass']\n",
       "\n",
       "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
+      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
       "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
       "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
      ],
@@ -87,6 +88,7 @@
       "dx = dd['dx']\n",
       "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
       "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
+      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
       "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
      ],
      "language": "python",
@@ -109,8 +111,10 @@
       "\n",
       "* `in_units`\n",
       "* `in_cgs`\n",
+      "* `in_mks`\n",
       "* `convert_to_units`\n",
-      "* `convert_to_cgs`"
+      "* `convert_to_cgs`\n",
+      "* `convert_to_mks`"
      ]
     },
     {
@@ -134,15 +138,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The second, `in_cgs`, returns a copy of the array converted into the base units of yt's CGS unit system:"
+      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print (dd['pressure']/dd['density'])\n",
-      "print (dd['pressure']/dd['density']).in_cgs()"
+      "print (dd['pressure'])\n",
+      "print (dd['pressure']).in_cgs()\n",
+      "print (dd['pressure']).in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:a07224c25b1d938bc1014b6d9d09c1a2392912f21b821b07615e65302677ef9b"
+  "signature": "sha256:242d7005d45a82744713bfe6389e49d47f39b524d1e7fcbf5ceb2e65dc473e68"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,77 +20,6 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "The unit registry"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "When a dataset is loaded, we attempt to detect and assign conversion factors from the internal simulation coordinate system and the physical CGS system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.mods import *\n",
-      "\n",
-      "ds = load('Enzo_64/DD0043/data0043')\n",
-      "\n",
-      "ds.unit_registry"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.unit_registry.lut"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "\n",
-      "It is not necessary to specify a unit registry when creating a new `YTArray` or `YTQuantity` since `yt` ships with a default unit registry:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units.unit_object import default_unit_registry as reg\n",
-      "\n",
-      "unit_names = reg.lut.keys()\n",
-      "unit_names.sort()\n",
-      "\n",
-      "# Print out the first 10 unit names\n",
-      "for i in range(10):\n",
-      "    print unit_names[i], reg.lut[unit_names[i]]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Each entry in the lookup table is the string name of a base unit and a tuple containing the CGS conversion factor and dimensions of the unit symbol."
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
       "Code units"
      ]
     },
@@ -98,25 +27,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Some of the most interesting unit symbols are the ones for \"code\" units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "code_unit_names = [un for un in unit_names if 'code_' in un]\n",
-      "\n",
-      "print code_unit_names"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:"
      ]
     },
@@ -132,13 +42,22 @@
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object.  Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
       "reg = ds.unit_registry\n",
       "\n",
-      "for un in code_unit_names:\n",
-      "    print un, reg.lut[un]"
+      "for un in reg.keys():\n",
+      "    if un.startswith('code_'):\n",
+      "        fmt_tup = (un, reg.lut[un][0], reg.lut[un][1])\n",
+      "        print \"Unit name:      {:<15}\\nCGS conversion: {:<15}\\nDimensions:     {:<15}\\n\".format(*fmt_tup)"
      ],
      "language": "python",
      "metadata": {},
@@ -295,6 +214,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "The unit registry"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When you create a `YTArray` without referring to a unit registry, `yt` uses the default unit registry, which does not include code units or comoving units."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = YTQuantity(3, 'cm')\n",
+      "\n",
+      "print a.units.registry.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When a dataset is loaded, `yt` infers conversion factors from the internal simulation unit system to the CGS unit system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols.  For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n",
+      "\n",
+      "* `ds.arr()`\n",
+      "* `ds.quan()`\n",
+      "\n",
+      "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units.  For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = ds.quan(3, 'code_length')\n",
+      "\n",
+      "print a\n",
+      "print a.in_cgs()\n",
+      "print a.in_units('Mpccm/h')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "b = ds.arr([3, 4, 5], 'Mpccm/h')\n",
+      "print b\n",
+      "print b.in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -59,7 +59,7 @@
 master_doc = 'index'
 
 # General information about the project.
-project = u'yt'
+project = u'The yt Project'
 copyright = u'2013, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
@@ -119,11 +119,16 @@
 # documentation.
 html_theme_options = dict(
     bootstrap_version = "3",
-    bootswatch_theme = "readable"
+    bootswatch_theme = "readable",
+    navbar_links = [
+        ("How to get help", "help/index"),
+        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Cookbook", "cookbook/index"),
+        ],
+    navbar_sidebarrel = False,
+    globaltoc_depth = 2,
 )
 
-#html_style = "agogo_yt.css"
-
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
 

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -43,7 +43,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # This looks better.  Now let's try turning on opacity.
@@ -56,7 +56,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v3.png", clip_ratio=6.0)
 
 # This looks pretty good, now lets go back to the full resolution AMRKDTree

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -19,8 +19,8 @@
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**na.floor(na.log10(data_source[field]).min()  )
-c_max = 10**na.floor(na.log10(data_source[field]).max()+1)
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # keep only clumps with at least 20 cells
 function = 'self.data[\'%s\'].size > 20' % field

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/cookbook/multi_plot_slice_and_proj.py
--- a/doc/source/cookbook/multi_plot_slice_and_proj.py
+++ b/doc/source/cookbook/multi_plot_slice_and_proj.py
@@ -1,4 +1,5 @@
 from yt.mods import * # set up our namespace
+from yt.visualization.base_plot_types import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
 
@@ -18,7 +19,7 @@
 
 slc = pf.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
                  center=pf.domain_center)
-proj = pf.proj(2, "density", weight_field="density", center=pf.domain_center)
+proj = pf.proj("density", 2, weight_field="density", center=pf.domain_center)
 
 slc_frb = slc.to_frb((1.0, "mpc"), 512)
 proj_frb = proj.to_frb((1.0, "mpc"), 512)

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -31,4 +31,4 @@
 # relating to what our dataset is called.
 # We save the log of the values so that the colors do not span
 # many orders of magnitude.  Try it without and see what happens.
-write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+write_image(np.log10(image), "%s_offaxis_projection.png" % pf)

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -21,13 +21,13 @@
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
 cam.snapshot("v1.png", clip_ratio=6.0)
 
-# In this case, the default alphas used (na.logspace(-3,0,Nbins)) does not
+# In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.logspace(0,0,4), colormap = 'RdBu_r')
+        alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
@@ -40,14 +40,14 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v4.png", clip_ratio=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=30.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v5.png", clip_ratio=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
@@ -55,7 +55,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=100.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v6.png", clip_ratio=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -12,7 +12,7 @@
 
 # Create a transfer function to map field values to colors.
 # We bump up our minimum to cut out some of the background fluid
-tf = ColorTransferFunction((na.log10(mi)+2.0, na.log10(ma)))
+tf = ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
 
 # Add three guassians, evenly spaced between the min and
 # max specified above with widths of 0.02 and using the

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ b/doc/source/cookbook/save_profiles.py
@@ -33,7 +33,7 @@
 # separate columns into separate NumPy arrays, it is essential to set unpack=True.
 
 r, dens, std_dens, temp, std_temp = \
-	na.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
+	np.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
 
 fig1 = plt.figure()
 

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/cookbook/simple_slice_matplotlib_example.py
--- a/doc/source/cookbook/simple_slice_matplotlib_example.py
+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py
@@ -21,7 +21,7 @@
 rect = (0.2,0.2,0.2,0.2)
 new_ax = fig.add_axes(rect)
 
-n, bins, patches = new_ax.hist(na.random.randn(1000)+20, 50,
+n, bins, patches = new_ax.hist(np.random.randn(1000)+20, 50,
     facecolor='yellow', edgecolor='yellow')
 new_ax.set_xlabel('Dinosaurs per furlong')
 

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -379,7 +379,7 @@
    something_else``.  Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do ``a =
    a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-   ``na.multiply(a, 3, a)``.
+   ``np.multiply(a, 3, a)``.
  * In general, avoid all double-underscore method names: ``__something`` is
    usually unnecessary.
  * Doc strings should describe input, output, behavior, and any state changes

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -12,7 +12,7 @@
 computation engine, Matplotlib for some visualization tasks and Mercurial for
 version control.  Because installation of all of these interlocking parts can 
 be time-consuming, yt provides an installation script which downloads and builds
-a fully-isolated Python + Numpy + Matplotlib + HDF5 + Mercurial installation.  
+a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
 other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
 supported.
@@ -86,16 +86,41 @@
 Alternative Installation Methods
 --------------------------------
 
-If you want to forego the use of the install script, you need to make sure 
-you have yt's dependencies installed on your system.  These include: a C compiler, 
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``numpy``, and 
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``)
-to install yt as:
+If you want to forego the use of the install script, you need to make sure you
+have yt's dependencies installed on your system.  These include: a C compiler,
+``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
+``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
+install yt as:
 
 .. code-block:: bash
 
   $ pip install yt
 
+The source code for yt may be found at the Bitbucket project site and can also be
+utilized for installation. If you prefer to use it instead of relying on external
+tools, you will need ``mercurial`` to clone the official repo:
+
+.. code-block:: bash
+
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user
+
+It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
+Please refer to ``setuptools`` documentation for the additional options.
+
+Provided that the required dependencies are in a predictable location, yt should
+be able to find them automatically. However, you can manually specify prefix used
+for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
+``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
+environmental variables respectively, e.g.
+
+.. code-block:: bash
+
+  $ echo '/usr/local' > hdf5.cfg
+  $ export FTYPE_DIR=/opt/freetype
+
 If you choose this installation method, you do not need to run the activation
 script as it is unnecessary.
 

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -15,18 +15,6 @@
    ~yt.visualization.plot_window.ProjectionPlot
    ~yt.visualization.plot_window.OffAxisProjectionPlot
 
-PlotCollection
-^^^^^^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-   ~yt.visualization.plot_collection.PlotCollection
-   ~yt.visualization.plot_collection.PlotCollectionInteractive
-   ~yt.visualization.fixed_resolution.FixedResolutionBuffer
-   ~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer
-   ~yt.visualization.base_plot_types.get_multi_plot
-
 Data Sources
 ------------
 
@@ -672,7 +660,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_passthrough
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_root_only
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_simple_proxy
-   ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_splitter
 
 Math Utilities
 --------------
@@ -721,8 +708,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ConstructedRootGrid
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ExtractedHierarchy
 
 
 Testing Infrastructure

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:d75e416150ccb017cfdf89973f8d4463e780da4d9bdc9a3783001d22021d9081"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -154,7 +155,7 @@
       "Npixels = 512 \n",
       "cam = pf.h.camera(c, L, W, Npixels, tfh.tf, fields=['temperature'],\n",
       "                  north_vector=[1.,0.,0.], steady_north=True, \n",
-      "                  sub_samples=5, no_ghost=False, l_max=0)\n",
+      "                  sub_samples=5, no_ghost=False)\n",
       "\n",
       "# Here we substitute the TransferFunction we constructed earlier.\n",
       "cam.transfer_function = tfh.tf\n",

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -32,8 +32,8 @@
    data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
                            (8., 'kpc'), (1., 'kpc'))
 
-   c_min = 10**na.floor(na.log10(data_source['density']).min()  )
-   c_max = 10**na.floor(na.log10(data_source['density']).max()+1)
+   c_min = 10**np.floor(np.log10(data_source['density']).min()  )
+   c_max = 10**np.floor(np.log10(data_source['density']).max()+1)
 
    function = 'self.data[\'Density\'].size > 20'
    master_clump = Clump(data_source, None, 'density', function=function)
@@ -104,42 +104,31 @@
 
 -------------
 
-.. function:: annotate_hop_circles(self, hop_output, max_number=None, annotate=False, min_size=20, max_size=10000000, font_size=8, print_halo_size=False, print_halo_mass=False, width=None):
+.. function:: annotate_halos(self, halo_catalog, col='white', alpha =1, width= None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.HopCircleCallback`.)
+   (This is a proxy for :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
 
-   Accepts a :class:`yt.HopList` *hop_output* and plots up
-   to *max_number* (None for unlimited) halos as circles.
+   Accepts a :class:`yt.HaloCatalog` *HaloCatalog* and plots 
+   a circle at the location of each halo with the radius of
+   the circle corresponding to the virial radius of the halo.
+   If *width* is set to None (default) all halos are plotted.
+   Otherwise, only halos that fall within a slab with width
+   *width* centered on the center of the plot data. The 
+   color and transparency of the circles can be controlled with
+   *col* and *alpha* respectively.
 
 .. python-script::
+   
+   from yt.mods import *
+   data_pf = load('Enzo_64/RD0006/RD0006')
+   halos_pf = load('rockstar_halos/halos_0.0.bin')
 
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   halos = HaloFinder(pf)
-   p = ProjectionPlot(pf, "z", "density")
-   p.annotate_hop_circles(halos)
-   p.save()
+   hc = HaloCatalog(halos_pf=halos_pf)
+   hc.create()
 
--------------
-
-.. function:: annotate_hop_particles(self, hop_output, max_number, p_size=1.0, min_size=20, alpha=0.2):
-
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.HopParticleCallback`.)
-
-   Adds particle positions for the members of each halo as
-   identified by HOP. Along *axis* up to *max_number* groups
-   in *hop_output* that are larger than *min_size* are
-   plotted with *p_size* pixels per particle;  *alpha*
-   determines the opacity of each particle.
-
-.. python-script::
-
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   halos = HaloFinder(pf)
-   p = ProjectionPlot(pf, "x", "density", center='m', width=(10, 'Mpc'))
-   p.annotate_hop_particles(halos, max_number=100, p_size=5.0)
-   p.save()
+   prj = ProjectionPlot(data_pf, 'z', 'density')
+   prj.annotate_halos(hc)
+   prj.save()
 
 -------------
 

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -254,7 +254,7 @@
    c = [0.5, 0.5, 0.5]
    N = 512
    image = off_axis_projection(pf, c, L, W, N, "density")
-   write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+   write_image(np.log10(image), "%s_offaxis_projection.png" % pf)
 
 Here, ``W`` is the width of the projection in the x, y, *and* z
 directions.

diff -r 7e5db57e299484a66e5cc795705a0e924d8f38a1 -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -137,8 +137,7 @@
 
 # Now individual component imports from the visualization API
 from yt.visualization.api import \
-    PlotCollection, PlotCollectionInteractive, \
-    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
+    FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     write_bitmap, write_image, \
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
@@ -146,7 +145,8 @@
     show_colormaps, ProfilePlot, PhasePlot
 
 from yt.visualization.volume_rendering.api import \
-    off_axis_projection
+    off_axis_projection, ColorTransferFunction, \
+    TransferFunctionHelper
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, enable_parallelism
@@ -154,6 +154,9 @@
 from yt.convenience import \
     load, simulation
 
+from yt.testing import \
+    run_nose
+
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b5df66f031fe/
Changeset:   b5df66f031fe
Branch:      yt-3.0
User:        atmyers
Date:        2014-05-04 05:24:05
Summary:     forgot this
Affected #:  1 file

diff -r e80eecb7cbb5f55e835cdb8c4e8c71d66663566a -r b5df66f031fe4000df9f781f3b89515cac61eedd yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -77,7 +77,6 @@
     def _setup_dx(self):
         # has already been read in and stored in index
         self.dds = self.pf.arr(self.index.dds_list[self.Level], "code_length")
-<<<<<<< local
 
     @property
     def Parent(self):
@@ -90,8 +89,6 @@
     def Children(self):
         return [self.index.grids[cid - self._id_offset]
                 for cid in self._children_ids]
-=======
->>>>>>> other
 
 class ChomboHierarchy(GridIndex):
 
@@ -412,70 +409,8 @@
     def __init__(self, filename, dataset_type='orion_chombo_native',
                  storage_filename = None, ini_filename = None):
 
-<<<<<<< local
         ChomboDataset.__init__(self, filename, dataset_type, 
-            storage_filename, ini_filename)
-=======
-    def _populate_grid_objects(self):
-        self._reconstruct_parent_child()
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-        self.max_level = self.grid_levels.max()
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-    def _reconstruct_parent_child(self):
-        mask = np.empty(len(self.grids), dtype='int32')
-        mylog.debug("First pass; identifying child grids")
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(self.grid_left_edge[i,:],
-                                self.grid_right_edge[i,:],
-                                self.grid_levels[i] + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            ids = np.where(mask.astype("bool")) # where is a tuple
-            grid._children_ids = ids[0] + grid._id_offset 
-        mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
-            for child in grid.Children:
-                child._parent_id.append(i + grid._id_offset)
-
-class ChomboDataset(Dataset):
-    _index_class = ChomboHierarchy
-    _field_info_class = ChomboFieldInfo
-
-    def __init__(self, filename, dataset_type='chombo_hdf5',
-                 storage_filename = None, ini_filename = None):
-        self.fluid_types += ("chombo",)
-        self._handle = h5py.File(filename,'r')
-        self.current_time = self._handle.attrs['time']
-        self.ini_filename = ini_filename
-        self.fullplotdir = os.path.abspath(filename)
-        Dataset.__init__(self,filename,dataset_type)
-        self.storage_filename = storage_filename
-        self.cosmological_simulation = False
-
-        # These are parameters that I very much wish to get rid of.
-        self.parameters["HydroMethod"] = 'chombo' # always PPM DE
-        self.parameters["DualEnergyFormalism"] = 0 
-        self.parameters["EOSType"] = -1 # default
-
-    def __del__(self):
-        self._handle.close()
-
-    def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
->>>>>>> other
+                    storage_filename, ini_filename)
 
     def _parse_parameter_file(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/7de110e64df8/
Changeset:   7de110e64df8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-04 16:56:54
Summary:     Merged in atmyers/yt-fixes/yt-3.0 (pull request #858)

Chombo frontend Refactor
Affected #:  5 files

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -16,11 +16,12 @@
 from .data_structures import \
       ChomboGrid, \
       ChomboHierarchy, \
-      ChomboDataset
+      ChomboDataset, \
+      Orion2Hierarchy, \
+      Orion2Dataset
 
 from .fields import \
       ChomboFieldInfo
-add_chombo_field = ChomboFieldInfo.add_field
 
 from .io import \
       IOHandlerChomboHDF5

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -27,11 +27,6 @@
 from stat import \
      ST_CTIME
 
-from .definitions import \
-     chombo2enzoDict, \
-     yt2chomboFieldsDict, \
-     parameterDict \
-
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
      AMRGridPatch
@@ -48,7 +43,7 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -56,8 +51,8 @@
     def __init__(self, id, index, level, start, stop):
         AMRGridPatch.__init__(self, id, filename = index.index_filename,
                               index = index)
-        self.Parent = []
-        self.Children = []
+        self._parent_id = []
+        self._children_ids = []
         self.Level = level
         self.ActiveDimensions = stop - start + 1
 
@@ -69,7 +64,7 @@
         """
         if self.start_index is not None:
             return self.start_index
-        if self.Parent == []:
+        if self.Parent is None:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
             return np.rint(start_index).astype('int64').ravel()
@@ -83,14 +78,33 @@
         # has already been read in and stored in index
         self.dds = self.pf.arr(self.index.dds_list[self.Level], "code_length")
 
+    @property
+    def Parent(self):
+        if len(self._parent_id) == 0:
+            return None
+        return [self.index.grids[pid - self._id_offset]
+                for pid in self._parent_id]
+
+    @property
+    def Children(self):
+        return [self.index.grids[cid - self._id_offset]
+                for cid in self._children_ids]
+
 class ChomboHierarchy(GridIndex):
 
     grid = ChomboGrid
+    _data_file = None
 
     def __init__(self,pf,dataset_type='chombo_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
         self.dataset_type = dataset_type
+
+        if pf.dimensionality == 1:
+            self.dataset_type = "chombo1d_hdf5"
+        if pf.dimensionality == 2:
+            self.dataset_type = "chombo2d_hdf5"        
+
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the index file is the parameter file!
@@ -99,12 +113,267 @@
         self.directory = pf.fullpath
         self._handle = pf._handle
 
-        self.float_type = self._handle['/level_0']['data:datatype=0'].dtype.name
-        self._levels = self._handle.keys()[1:]
+        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
         GridIndex.__init__(self,pf,dataset_type)
+
         self._read_particles()
 
     def _read_particles(self):
+
+        # only do anything if the dataset contains particles
+        if not any([f[1].startswith('particle_') for f in self.field_list]):
+            return
+        
+        self.num_particles = 0
+        particles_per_grid = []
+        for key, val in self._handle.items():
+            if key.startswith('level'):
+                level_particles = val['particles:offsets'][:]
+                self.num_particles += level_particles.sum()
+                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
+
+        for i, grid in enumerate(self.grids):
+            self.grids[i].NumberOfParticles = particles_per_grid[i]
+            self.grid_particle_count[i] = particles_per_grid[i]
+
+        assert(self.num_particles == self.grid_particle_count.sum())
+
+    # Chombo datasets, by themselves, have no "known" fields. However, 
+    # we will look for "fluid" fields by finding the string "component" in
+    # the output file, and "particle" fields by finding the string "particle".
+    def _detect_output_fields(self):
+
+        # look for fluid fields
+        output_fields = []
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith("component"):
+                output_fields.append(val)
+        self.field_list = [("chombo", c) for c in output_fields]
+
+        # look for particle fields
+        particle_fields = []
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith("particle"):
+                particle_fields.append(val)
+        self.field_list.extend([("io", c) for c in particle_fields])        
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._handle[lev]['Processors'].len()
+
+    def _parse_index(self):
+        f = self._handle # shortcut
+        self.max_level = f.attrs['max_level']
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.parameter_file.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_levels[i] = level_number
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+    def _populate_grid_objects(self):
+        self._reconstruct_parent_child()
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
+        for i, grid in enumerate(self.grids):
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            ids = np.where(mask.astype("bool")) # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset 
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child._parent_id.append(i + grid._id_offset)
+
+class ChomboDataset(Dataset):
+    _index_class = ChomboHierarchy
+    _field_info_class = ChomboFieldInfo
+
+    def __init__(self, filename, dataset_type='chombo_hdf5',
+                 storage_filename = None, ini_filename = None):
+        self.fluid_types += ("chombo",)
+        self._handle = h5py.File(filename, 'r')
+
+        # look up the dimensionality of the dataset
+        D = self._handle['Chombo_global/'].attrs['SpaceDim']
+        if D == 1:
+            self.dataset_type = 'chombo1d_hdf5'
+        if D == 2:
+            self.dataset_type = 'chombo2d_hdf5'
+        if D == 3:
+            self.dataset_type = 'chombo_hdf5'
+
+        # some datasets will not be time-dependent, make
+        # sure we handle that here.
+        try:
+            self.current_time = self._handle.attrs['time']
+        except KeyError:
+            self.current_time = 0.0
+
+        self.geometry = "cartesian"
+        self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
+        Dataset.__init__(self,filename, self.dataset_type)
+        self.storage_filename = storage_filename
+        self.cosmological_simulation = False
+
+        # These are parameters that I very much wish to get rid of.
+        self.parameters["HydroMethod"] = 'chombo'
+        self.parameters["DualEnergyFormalism"] = 0 
+        self.parameters["EOSType"] = -1 # default
+
+    def __del__(self):
+        self._handle.close()
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = YTQuantity(1.0, "cm")
+        self.mass_unit = YTQuantity(1.0, "g")
+        self.time_unit = YTQuantity(1.0, "s")
+        self.velocity_unit = YTQuantity(1.0, "cm/s")
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    def _parse_parameter_file(self):
+        
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_left_edge = self._calc_left_edge()
+        self.domain_right_edge = self._calc_right_edge()
+        self.domain_dimensions = self._calc_domain_dimensions()
+
+        # if a lower-dimensional dataset, set up pseudo-3D stuff here.
+        if self.dimensionality == 1:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+        
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self.periodicity = (True, True, True)
+
+    def _calc_left_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        return LE
+
+    def _calc_right_edge(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        fileh.close()
+        return RE
+
+    def _calc_domain_dimensions(self):
+        fileh = self._handle
+        D = self.dimensionality
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return R_index - L_index
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+
+        if not (pluto_ini_file_exists and orion2_ini_file_exists):
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Chombo_global" in fileh["/"]
+                # ORION2 simulations should always have this:
+                valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
+                fileh.close()
+                return valid
+            except:
+                pass
+        return False
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+
+class Orion2Hierarchy(ChomboHierarchy):
+
+    def __init__(self, pf, dataset_type="orion_chombo_native"):
+        ChomboHierarchy.__init__(self, pf, dataset_type)
+
+    def _read_particles(self):
         self.particle_filename = self.index_filename[:-4] + 'sink'
         if not os.path.exists(self.particle_filename): return
         with open(self.particle_filename, 'r') as f:
@@ -132,104 +401,16 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
-    def _detect_output_fields(self):
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        self.field_list = [("chombo", c[1]) for c in self._handle['/'].attrs.items()[-ncomp:]]
-          
-    def _count_grids(self):
-        self.num_grids = 0
-        for lev in self._levels:
-            self.num_grids += self._handle[lev]['Processors'].len()
+class Orion2Dataset(ChomboDataset):
 
-    def _parse_index(self):
-        f = self._handle # shortcut
+    _index_class = Orion2Hierarchy
+    _field_info_class = Orion2FieldInfo
 
-        # this relies on the first Group in the H5 file being
-        # 'Chombo_global'
-        levels = f.keys()[1:]
-        grids = []
-        self.dds_list = []
-        i = 0
-        for lev in levels:
-            level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            boxes = f[lev]['boxes'].value
-            dx = f[lev].attrs['dx']
-            self.dds_list.append(dx * np.ones(3))
-            for level_id, box in enumerate(boxes):
-                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
-                pg = self.grid(len(grids),self,level=level_number,
-                               start = si, stop = ei)
-                grids.append(pg)
-                grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type)
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1)
-                self.grid_particle_count[i] = 0
-                self.grid_dimensions[i] = ei - si + 1
-                i += 1
-        self.grids = np.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
-#        self.grids = np.array(self.grids, dtype='object')
+    def __init__(self, filename, dataset_type='orion_chombo_native',
+                 storage_filename = None, ini_filename = None):
 
-    def _populate_grid_objects(self):
-        self._reconstruct_parent_child()
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-        self.max_level = self.grid_levels.max()
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-    def _reconstruct_parent_child(self):
-        mask = np.empty(len(self.grids), dtype='int32')
-        mylog.debug("First pass; identifying child grids")
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(self.grid_left_edge[i,:],
-                                self.grid_right_edge[i,:],
-                                self.grid_levels[i] + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            ids = np.where(mask.astype("bool")) # where is a tuple
-            grid._children_ids = ids[0] + grid._id_offset 
-        mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
-            for child in grid.Children:
-                child._parent_id.append(i + grid._id_offset)
-
-class ChomboDataset(Dataset):
-    _index_class = ChomboHierarchy
-    _field_info_class = ChomboFieldInfo
-
-    def __init__(self, filename, dataset_type='chombo_hdf5',
-                 storage_filename = None, ini_filename = None):
-        self.fluid_types += ("chombo",)
-        self._handle = h5py.File(filename,'r')
-        self.current_time = self._handle.attrs['time']
-        self.ini_filename = ini_filename
-        self.fullplotdir = os.path.abspath(filename)
-        Dataset.__init__(self,filename,dataset_type)
-        self.storage_filename = storage_filename
-        self.cosmological_simulation = False
-
-        # These are parameters that I very much wish to get rid of.
-        self.parameters["HydroMethod"] = 'chombo' # always PPM DE
-        self.parameters["DualEnergyFormalism"] = 0 
-        self.parameters["EOSType"] = -1 # default
-
-    def __del__(self):
-        self._handle.close()
-
-    def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
 
     def _parse_parameter_file(self):
         """
@@ -237,14 +418,19 @@
         exists in the plot file directory. If one does, attempt to parse it.
         Otherwise grab the dimensions from the hdf5 file.
         """
-        
-        if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
+
+        orion2_ini_file_exists = False
+        dir_name = os.path.dirname(os.path.abspath(self.fullplotdir))
+        orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+        orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+
+        if orion2_ini_file_exists: self._parse_inputs_file('orion2.ini')
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
-        self.domain_left_edge = self.__calc_left_edge()
-        self.domain_right_edge = self.__calc_right_edge()
-        self.domain_dimensions = self.__calc_domain_dimensions()
         self.dimensionality = 3
+        self.domain_left_edge = self._calc_left_edge()
+        self.domain_right_edge = self._calc_right_edge()
+        self.domain_dimensions = self._calc_domain_dimensions()
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self.periodicity = (True, True, True)
 
@@ -261,57 +447,33 @@
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
-            if chombo2enzoDict.has_key(param):
-                paramName = chombo2enzoDict[param]
-                t = map(parameterDict[paramName], vals.split())
-                if len(t) == 1:
-                    if paramName == "GAMMA":
-                        self.gamma = t[0]
-                    else:
-                        self.parameters[paramName] = t[0]
-                else:
-                    if paramName == "RefineBy":
-                        self.parameters[paramName] = t[0]
-                    else:
-                        self.parameters[paramName] = t
-
-    def __calc_left_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        return LE
-
-    def __calc_right_edge(self):
-        fileh = h5py.File(self.parameter_filename,'r')
-        dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
-        fileh.close()
-        return RE
-
-    def __calc_domain_dimensions(self):
-        fileh = self._handle
-        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
-        return R_index - L_index
+            if param == "GAMMA":
+                self.gamma = vals
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        if not os.path.isfile('pluto.ini'):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+        
+        if orion2_ini_file_exists:
+            return True
+
+        if not pluto_ini_file_exists:
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
+                valid = 'CeilVA_mass' in fileh.attrs.keys()
                 fileh.close()
                 return valid
             except:
                 pass
         return False
 
-    @parallel_root_only
-    def print_key_parameters(self):
-        for a in ["current_time", "domain_dimensions", "domain_left_edge",
-                  "domain_right_edge"]:
-            if not hasattr(self, a):
-                mylog.error("Missing %s in parameter file definition!", a)
-                continue
-            v = getattr(self, a)
-            mylog.info("Parameters: %-25s = %s", a, v)

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/definitions.py
--- a/yt/frontends/chombo/definitions.py
+++ b/yt/frontends/chombo/definitions.py
@@ -13,42 +13,4 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                                 }
 
-chombo2enzoDict = {"GAMMA": "Gamma",
-                  "Ref_ratio": "RefineBy"
-                                    }
-
-yt2chomboFieldsDict = {}
-chombo2ytFieldsDict = {}
-

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -28,9 +28,15 @@
 mom_units = "code_mass / (code_time * code_length**2)"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
 
+# Chombo does not have any known fields by itself.
+class ChomboFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+    known_particle_fields = ()
+
+# Orion 2 Fields
 # We duplicate everything here from Boxlib, because we want to be able to
 # subclass it and that can be somewhat tricky.
-class ChomboFieldInfo(FieldInfoContainer):
+class Orion2FieldInfo(ChomboFieldInfo):
     known_other_fields = (
         ("density", (rho_units, ["density"], None)),
         ("energy-density", (eden_units, ["energy_density"], None)),

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -27,7 +27,8 @@
     _data_string = 'data:datatype=0'
 
     def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, pf)
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
         self._handle = pf._handle
 
     _field_dict = None
@@ -35,16 +36,29 @@
     def field_dict(self):
         if self._field_dict is not None:
             return self._field_dict
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        temp =  self._handle['/'].attrs.items()[-ncomp:]
-        val, keys = zip(*temp)
-        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
-        self._field_dict = dict(zip(keys,val))
+        field_dict = {}
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith('component_'):
+                comp_number = int(re.match('component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._field_dict = field_dict
         return self._field_dict
+
+    _particle_field_index = None
+    @property
+    def particle_field_index(self):
+        if self._particle_field_index is not None:
+            return self._particle_field_index
+        field_dict = {}
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith('particle_'):
+                comp_number = int(re.match('particle_component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._particle_field_index = field_dict
+        return self._particle_field_index        
         
     def _read_field_names(self,grid):
         ncomp = int(self._handle['/'].attrs['num_components'])
-
         fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
@@ -90,6 +104,7 @@
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s grids",
                    size, [f2 for f1, f2 in fields], ng)
+
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
@@ -107,6 +122,80 @@
                 ind += nd
         return rv
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+
+        if selector.__class__.__name__ == "GridSelector":
+
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+
+            grid = chunks[0].objs[0]
+
+            for ftype, fname in fields:
+                rv[ftype, fname] = self._read_particles(grid, fname)
+
+            return rv
+
+        rv = {f:np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    data = self._read_particles(grid, fname)
+                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
+        return rv
+
+    def _read_particles(self, grid, name):
+
+        field_index = self.particle_field_index[name]
+        lev = 'level_%s' % grid.Level
+
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
+        items_per_particle = len(self._particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+
+        # convert between the global grid id and the id on this level            
+        grid_levels = np.array([g.Level for g in self.pf.index.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.index.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        # handle the case where this grid has no particles
+        if (offsets[lo] == offsets[hi]):
+            return np.array([], dtype=np.float64)
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
+        return data[field_index::items_per_particle]
+
+class IOHandlerChombo2DHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "chombo2d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+
+class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "chombo1d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle   
+
+class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
+    _dataset_type = "orion_chombo_native"
+
     def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list