[yt-svn] commit/yt: 69 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Sep 20 20:24:42 PDT 2014


69 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/d060e6abf8ff/
Changeset:   d060e6abf8ff
Branch:      yt-3.0
User:        atmyers
Date:        2014-06-05 22:20:24+00:00
Summary:     adding a frontend for ChomboPIC
Affected #:  3 files

diff -r 51fc454e1548d776f335d2f30079569e9f3a81e7 -r d060e6abf8ffa0f2f8a7ca326a3f616706c5180d yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -18,10 +18,14 @@
       ChomboHierarchy, \
       ChomboDataset, \
       Orion2Hierarchy, \
-      Orion2Dataset
+      Orion2Dataset, \
+      ChomboPICHierarchy, \
+      ChomboPICDataset
 
 from .fields import \
-      ChomboFieldInfo
+      ChomboFieldInfo, \
+      Orion2FieldInfo, \
+      ChomboPICFieldInfo
 
 from .io import \
       IOHandlerChomboHDF5

diff -r 51fc454e1548d776f335d2f30079569e9f3a81e7 -r d060e6abf8ffa0f2f8a7ca326a3f616706c5180d yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -40,7 +40,7 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo, Orion2FieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo, ChomboPICFieldInfo
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -349,6 +349,7 @@
                 valid = "Chombo_global" in fileh["/"]
                 # ORION2 simulations should always have this:
                 valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
+                valid = valid and not ('Charm_global' in fileh.keys())
                 fileh.close()
                 return valid
             except:
@@ -475,3 +476,44 @@
                 pass
         return False
 
+class ChomboPICHierarchy(ChomboHierarchy):
+
+    def __init__(self, pf, dataset_type="chombo_hdf5"):
+        ChomboHierarchy.__init__(self, pf, dataset_type)
+
+class ChomboPICDataset(ChomboDataset):
+
+    _index_class = ChomboPICHierarchy
+    _field_info_class = ChomboPICFieldInfo
+
+    def __init__(self, filename, dataset_type='chombo_hdf5',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+        
+        if orion2_ini_file_exists:
+            return True
+
+        if not pluto_ini_file_exists:
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Charm_global" in fileh["/"]
+                fileh.close()
+                return valid
+            except:
+                pass
+        return False
\ No newline at end of file

diff -r 51fc454e1548d776f335d2f30079569e9f3a81e7 -r d060e6abf8ffa0f2f8a7ca326a3f616706c5180d yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -88,3 +88,22 @@
                        units = "erg/cm**3")
         self.add_field("temperature", function=_temperature,
                        units="K")
+
+# Chombo does not have any known fields by itself.
+class ChomboPICFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+        ("gravitational_field_z", ("code_length / code_time**2", ["gravitational-field-z"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+        ("particle_velocity_z", ("code_length / code_time", [], None)),
+    )


https://bitbucket.org/yt_analysis/yt/commits/be67dff7a658/
Changeset:   be67dff7a658
Branch:      yt-3.0
User:        atmyers
Date:        2014-06-05 22:21:28+00:00
Summary:     max_level may not be in the file, but num_levels always will be
Affected #:  1 file

diff -r d060e6abf8ffa0f2f8a7ca326a3f616706c5180d -r be67dff7a6581ed0d1150c85e36706f27efc0bc9 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -162,7 +162,7 @@
 
     def _parse_index(self):
         f = self._handle # shortcut
-        self.max_level = f.attrs['max_level']
+        self.max_level = f.attrs['num_levels'] - 1
 
         grids = []
         self.dds_list = []


https://bitbucket.org/yt_analysis/yt/commits/fcc8c5859ec6/
Changeset:   fcc8c5859ec6
Branch:      yt-3.0
User:        atmyers
Date:        2014-06-05 23:54:39+00:00
Summary:     field information for ChomboPIC, including low-dim support
Affected #:  3 files

diff -r be67dff7a6581ed0d1150c85e36706f27efc0bc9 -r fcc8c5859ec668a3efba4fd1a914ccf89c5eb2f4 yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -25,7 +25,9 @@
 from .fields import \
       ChomboFieldInfo, \
       Orion2FieldInfo, \
-      ChomboPICFieldInfo
+      ChomboPICFieldInfo1D, \
+      ChomboPICFieldInfo2D, \
+      ChomboPICFieldInfo3D
 
 from .io import \
       IOHandlerChomboHDF5

diff -r be67dff7a6581ed0d1150c85e36706f27efc0bc9 -r fcc8c5859ec668a3efba4fd1a914ccf89c5eb2f4 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -40,7 +40,8 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo, Orion2FieldInfo, ChomboPICFieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D 
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -484,7 +485,7 @@
 class ChomboPICDataset(ChomboDataset):
 
     _index_class = ChomboPICHierarchy
-    _field_info_class = ChomboPICFieldInfo
+    _field_info_class = ChomboPICFieldInfo3D
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
@@ -492,6 +493,12 @@
         ChomboDataset.__init__(self, filename, dataset_type, 
                     storage_filename, ini_filename)
 
+        if self.dimensionality == 1:
+            self._field_info_class = ChomboPICFieldInfo1D
+
+        if self.dimensionality == 2:
+            self._field_info_class = ChomboPICFieldInfo2D
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
 

diff -r be67dff7a6581ed0d1150c85e36706f27efc0bc9 -r fcc8c5859ec668a3efba4fd1a914ccf89c5eb2f4 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -89,11 +89,10 @@
         self.add_field("temperature", function=_temperature,
                        units="K")
 
-# Chombo does not have any known fields by itself.
-class ChomboPICFieldInfo(FieldInfoContainer):
+class ChomboPICFieldInfo3D(FieldInfoContainer):
     known_other_fields = (
         ("density", (rho_units, ["density", "Density"], None)),
-        ("potential", ("code_length**2 / code_time**2", ["potential"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
         ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
         ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
         ("gravitational_field_z", ("code_length / code_time**2", ["gravitational-field-z"], None)),
@@ -107,3 +106,75 @@
         ("particle_velocity_y", ("code_length / code_time", [], None)),
         ("particle_velocity_z", ("code_length / code_time", [], None)),
     )
+
+def _dummy_position(field, data):
+    return 0.5*np.ones_like(data['particle_position_x'])
+
+def _dummy_velocity(field, data):
+    return np.zeros_like(data['particle_velocity_x'])
+
+def _dummy_field(field, data):
+    return 0.0 * data['gravitational_field_x']
+
+class ChomboPICFieldInfo2D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, pf, field_list):
+        super(ChomboPICFieldInfo2D, self).__init__(pf, field_list)
+
+        self.add_field(('chombo', 'gravitational_field_z'), function = _dummy_field, 
+                        units = "code_length / code_time**2")
+                        
+        self.add_field(("io", "particle_position_z"), function = _dummy_position,
+                       particle_type = True,
+                       units = "code_length")
+
+        self.add_field(("io", "particle_velocity_z"), function = _dummy_velocity,
+                       particle_type = True,
+                       units = "code_length / code_time")
+
+class ChomboPICFieldInfo1D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, pf, field_list):
+        super(ChomboPICFieldInfo1D, self).__init__(pf, field_list)
+        
+        self.add_field(('chombo', 'gravitational_field_y'), function = _dummy_field, 
+                        units = "code_length / code_time**2")
+
+        self.add_field(('chombo', 'gravitational_field_z'), function = _dummy_field, 
+                units = "code_length / code_time**2")
+
+        self.add_field(("io", "particle_position_y"), function = _dummy_position,
+                       particle_type = True,
+                       units = "code_length")
+        self.add_field(("io", "particle_position_z"), function = _dummy_position,
+                       particle_type = True,
+                       units = "code_length")
+        self.add_field(("io", "particle_velocity_y"), function = _dummy_velocity,
+                       particle_type = True,
+                       units = "code_length / code_time")
+        self.add_field(("io", "particle_velocity_z"), function = _dummy_velocity,
+                       particle_type = True,
+                       units = "code_length / code_time")


https://bitbucket.org/yt_analysis/yt/commits/5226e1192f94/
Changeset:   5226e1192f94
Branch:      yt-3.0
User:        atmyers
Date:        2014-06-06 00:15:07+00:00
Summary:     add dummy fields to 'gas' and 'all' field types as well
Affected #:  1 file

diff -r fcc8c5859ec668a3efba4fd1a914ccf89c5eb2f4 -r 5226e1192f94a3e008951656cd37388ca8324563 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -116,6 +116,9 @@
 def _dummy_field(field, data):
     return 0.0 * data['gravitational_field_x']
 
+fluid_field_types = ['chombo', 'gas']
+particle_field_types = ['io', 'all']
+
 class ChomboPICFieldInfo2D(FieldInfoContainer):
     known_other_fields = (
         ("density", (rho_units, ["density", "Density"], None)),
@@ -134,16 +137,18 @@
     def __init__(self, pf, field_list):
         super(ChomboPICFieldInfo2D, self).__init__(pf, field_list)
 
-        self.add_field(('chombo', 'gravitational_field_z'), function = _dummy_field, 
-                        units = "code_length / code_time**2")
-                        
-        self.add_field(("io", "particle_position_z"), function = _dummy_position,
-                       particle_type = True,
-                       units = "code_length")
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+        
+        for ptype in particle_field_types:                
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
 
-        self.add_field(("io", "particle_velocity_z"), function = _dummy_velocity,
-                       particle_type = True,
-                       units = "code_length / code_time")
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
 
 class ChomboPICFieldInfo1D(FieldInfoContainer):
     known_other_fields = (
@@ -160,21 +165,23 @@
     def __init__(self, pf, field_list):
         super(ChomboPICFieldInfo1D, self).__init__(pf, field_list)
         
-        self.add_field(('chombo', 'gravitational_field_y'), function = _dummy_field, 
-                        units = "code_length / code_time**2")
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_y'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
 
-        self.add_field(('chombo', 'gravitational_field_z'), function = _dummy_field, 
-                units = "code_length / code_time**2")
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                    units = "code_length / code_time**2")
 
-        self.add_field(("io", "particle_position_y"), function = _dummy_position,
-                       particle_type = True,
-                       units = "code_length")
-        self.add_field(("io", "particle_position_z"), function = _dummy_position,
-                       particle_type = True,
-                       units = "code_length")
-        self.add_field(("io", "particle_velocity_y"), function = _dummy_velocity,
-                       particle_type = True,
-                       units = "code_length / code_time")
-        self.add_field(("io", "particle_velocity_z"), function = _dummy_velocity,
-                       particle_type = True,
-                       units = "code_length / code_time")
+        for ptype in particle_field_types:
+            self.add_field((ptype, "particle_position_y"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_velocity_y"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")


https://bitbucket.org/yt_analysis/yt/commits/912b342366db/
Changeset:   912b342366db
Branch:      yt-3.0
User:        atmyers
Date:        2014-06-06 20:52:05+00:00
Summary:     fixing fortran / c array ordering issue in the Chombo frontend
Affected #:  1 file

diff -r 5226e1192f94a3e008951656cd37388ca8324563 -r 912b342366dbe11fdbffe9b7362c3dd4bf4d2fab yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -171,7 +171,7 @@
             return np.array([], dtype=np.float64)
 
         data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-        return data[field_index::items_per_particle]
+        return np.asarray(data[field_index::items_per_particle], dtype=np.float64, order='F')
 
 class IOHandlerChombo2DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo2d_hdf5"


https://bitbucket.org/yt_analysis/yt/commits/dab68d6ab636/
Changeset:   dab68d6ab636
Branch:      yt-3.0
User:        atmyers
Date:        2014-06-11 19:17:15+00:00
Summary:     making the chombo frontend ghost cell aware
Affected #:  1 file

diff -r 912b342366dbe11fdbffe9b7362c3dd4bf4d2fab -r dab68d6ab636e2979ca0b25602c4b986edd4b3de yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -30,6 +30,17 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
+        self._read_ghost_info()
+
+    def _read_ghost_info(self):
+        self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
+        dim = len(self.ghost)
+        self._ghost_slice = [slice(g,-g, None) for g in self.ghost]
+
+        # pad with zeros if the dataset is low-dimensional
+        self.ghost += (3 - dim)*(0,)
+        
+        self.ghost = np.array(self.ghost)
 
     _field_dict = None
     @property
@@ -66,14 +77,15 @@
         lstring = 'level_%i' % grid.Level
         lev = self._handle[lstring]
         dims = grid.ActiveDimensions
-        boxsize = dims.prod()
+        shape = grid.ActiveDimensions + 2*self.ghost
+        boxsize = shape.prod()
         
         grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
+        data_no_ghost = data.reshape(shape, order='F')
+        return data_no_ghost[self._ghost_slice]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -83,16 +95,8 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             grid = chunks[0].objs[0]
-            lstring = 'level_%i' % grid.Level
-            lev = self._handle[lstring]
-            grid_offset = lev[self._offset_string][grid._level_id]
-            boxsize = grid.ActiveDimensions.prod()
             for ftype, fname in fields:
-                start = grid_offset+self.field_dict[fname]*boxsize
-                stop = start + boxsize
-                data = lev[self._data_string][start:stop]
-                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
-                                        order='F')
+                rv[ftype, fname] = self._read_data(grid, fname)
             return rv
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
@@ -108,16 +112,10 @@
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
-                lstring = 'level_%i' % g.Level
-                lev = self._handle[lstring]
-                grid_offset = lev[self._offset_string][g._level_id]
-                boxsize = g.ActiveDimensions.prod()
                 nd = 0
                 for field in fields:
-                    start = grid_offset+self.field_dict[fname]*boxsize
-                    stop = start + boxsize
-                    data = lev[self._data_string][start:stop]
-                    data = data.reshape(g.ActiveDimensions, order='F')
+                    ftype, fname = field
+                    data = self._read_data(g, fname)
                     nd = g.select(selector, data, rv[field], ind) # caches
                 ind += nd
         return rv
@@ -182,6 +180,7 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
+        self._read_ghost_info()
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
@@ -192,6 +191,7 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle   
+        self._read_ghost_info()
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"


https://bitbucket.org/yt_analysis/yt/commits/b5ea6408117e/
Changeset:   b5ea6408117e
Branch:      yt-3.0
User:        atmyers
Date:        2014-06-30 22:21:21+00:00
Summary:     Merged yt_analysis/yt/yt-3.0 into yt-3.0
Affected #:  180 files

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -41,6 +41,7 @@
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
 yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/ragged_arrays.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/GridTree.c

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/README
--- a/doc/README
+++ b/doc/README
@@ -5,6 +5,6 @@
 http://sphinx.pocoo.org/
 
 Because the documentation requires a number of dependencies, we provide
-pre-build versions online, accessible here:
+pre-built versions online, accessible here:
 
-http://yt-project.org/docs/
+http://yt-project.org/docs/dev-3.0/

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -568,7 +568,7 @@
 mkdir -p ${DEST_DIR}/data
 cd ${DEST_DIR}/data
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
-get_ytdata xray_emissivity.h5
+[ ! -e xray_emissivity.h5 ] && get_ytdata xray_emissivity.h5
 
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
@@ -608,7 +608,6 @@
 echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
 echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
@@ -624,7 +623,6 @@
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
 echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
 echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
@@ -657,7 +655,6 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
-get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -816,6 +813,7 @@
         YT_DIR=`dirname $ORIG_PWD`
     elif [ ! -e yt-hg ]
     then
+        echo "Cloning yt"
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
@@ -824,9 +822,9 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-3.0-hg ] 
+    elif [ -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
     fi
     echo Setting YT_DIR=${YT_DIR}
 fi
@@ -943,14 +941,19 @@
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
-    if [ ! -e Rockstar/done ]
+    if [ ! -e rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
-        cd Rockstar
+        if [ ! -e rockstar ]
+        then
+            ( hg clone http://bitbucket.org/MatthewTurk/rockstar 2>&1 ) 1>> ${LOG_FILE}
+        fi
+        cd rockstar
+        ( hg pull 2>&1 ) 1>> ${LOG_FILE}
+        ( hg up -C tip 2>&1 ) 1>> ${LOG_FILE}
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         cp librockstar.so ${DEST_DIR}/lib
-        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
+        ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
         echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -226,4 +226,4 @@
 =======
 
 For a full example of how to use these methods together see 
-:ref:`halo_analysis_example`.
+:doc:`halo_analysis_example`.

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/bootcamp/1)_Introduction.ipynb
--- a/doc/source/bootcamp/1)_Introduction.ipynb
+++ b/doc/source/bootcamp/1)_Introduction.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:39620670ce7751b23f30d2123fd3598de1c7843331f65de13e29f4ae9f759e0f"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -32,9 +33,40 @@
       "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
       "6. Volume Rendering (IsolatedGalaxy dataset)"
      ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "download_datasets = False\n",
+      "if download_datasets:\n",
+      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
+      "    print \"Got enzo_tiny_cosmology\"\n",
+      "    !tar xf enzo_tiny_cosmology.tar\n",
+      "    \n",
+      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
+      "    print \"Got Enzo_64\"\n",
+      "    !tar xf Enzo_64.tar\n",
+      "    \n",
+      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
+      "    print \"Got IsolatedGalaxy\"\n",
+      "    !tar xf IsolatedGalaxy.tar\n",
+      "    \n",
+      "    print \"All done!\""
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ b/doc/source/bootcamp/2)_Data_Inspection.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:15cdc35ddb8b1b938967237e17534149f734f4e7a61ebd37d74b675f8059da20"
+  "signature": "sha256:9d67e9e4ca5ce92dcd0658025dbfbd28be47b47ca8d4531fdac16cc2c2fa038b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,7 +21,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *"
+      "import yt"
      ],
      "language": "python",
      "metadata": {},
@@ -38,7 +38,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
      ],
      "language": "python",
      "metadata": {},

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ b/doc/source/bootcamp/3)_Simple_Visualization.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:eb5fbf5eb55a9c8997c687f072c8c6030e74bef0048a72b4f74a06893c11b80a"
+  "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,7 +21,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *"
+      "import yt"
      ],
      "language": "python",
      "metadata": {},
@@ -38,7 +38,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "print \"Redshift =\", ds.current_redshift"
      ],
      "language": "python",
@@ -58,7 +58,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "p = ProjectionPlot(ds, \"y\", \"density\")\n",
+      "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n",
       "p.show()"
      ],
      "language": "python",
@@ -135,7 +135,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "p = ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
+      "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
       "p.show()"
      ],
      "language": "python",
@@ -189,8 +189,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
+      "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
+      "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
       "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
       "s.zoom(10.0)"
      ],
@@ -243,7 +243,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "s = SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
+      "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
       "s.annotate_contour(\"temperature\")\n",
       "s.zoom(2.5)"
      ],
@@ -272,4 +272,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:41293a66cd6fd5eae6da2d0343549144dc53d72e83286999faab3cf21d801f51"
+  "signature": "sha256:a46e1baa90d32045c2b524100f28bad41b3665249612c9a275ee0375a6f4be20"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -22,8 +22,10 @@
      "collapsed": false,
      "input": [
       "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from matplotlib import pylab"
+      "import yt\n",
+      "import numpy as np\n",
+      "from matplotlib import pylab\n",
+      "from yt.analysis_modules.halo_finding.api import HaloFinder"
      ],
      "language": "python",
      "metadata": {},
@@ -44,7 +46,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ts = DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
+      "ts = yt.DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
      ],
      "language": "python",
      "metadata": {},
@@ -86,8 +88,13 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.semilogy(times, rho_ex[:,0], '-xk')\n",
-      "pylab.semilogy(times, rho_ex[:,1], '-xr')"
+      "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n",
+      "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n",
+      "pylab.ylabel(\"Density ($g/cm^3$)\")\n",
+      "pylab.xlabel(\"Time (Gyr)\")\n",
+      "pylab.legend()\n",
+      "pylab.ylim(1e-32, 1e-21)\n",
+      "pylab.show()"
      ],
      "language": "python",
      "metadata": {},
@@ -108,13 +115,15 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
+      "from yt.units import Msun\n",
+      "\n",
       "mass = []\n",
       "zs = []\n",
       "for ds in ts:\n",
       "    halos = HaloFinder(ds)\n",
       "    dd = ds.all_data()\n",
       "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    total_in_baryons = 0.0\n",
+      "    total_in_baryons = 0.0*Msun\n",
       "    for halo in halos:\n",
       "        sp = halo.get_sphere()\n",
       "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
@@ -136,7 +145,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.loglog(zs, mass, '-xb')"
+      "pylab.semilogx(zs, mass, '-xb')\n",
+      "pylab.xlabel(\"Redshift\")\n",
+      "pylab.ylabel(\"Mass in halos / Total mass\")\n",
+      "pylab.xlim(max(zs), min(zs))\n",
+      "pylab.ylim(-0.01, .18)"
      ],
      "language": "python",
      "metadata": {},
@@ -154,7 +167,9 @@
       "\n",
       "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
       "\n",
-      "To create a ray, we specify the start and end points."
+      "To create a ray, we specify the start and end points.\n",
+      "\n",
+      "Note that we need to convert these arrays to numpy arrays due to a bug in matplotlib 1.3.1."
      ]
     },
     {
@@ -162,7 +177,7 @@
      "collapsed": false,
      "input": [
       "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
-      "pylab.semilogy(ray[\"t\"], ray[\"density\"])"
+      "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))"
      ],
      "language": "python",
      "metadata": {},
@@ -211,10 +226,12 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "v, c = ds.find_max(\"density\")\n",
       "sl = ds.slice(0, c[0])\n",
-      "print sl[\"index\", \"x\"], sl[\"index\", \"z\"], sl[\"pdx\"]\n",
+      "print sl[\"index\", \"x\"]\n",
+      "print sl[\"index\", \"z\"]\n",
+      "print sl[\"pdx\"]\n",
       "print sl[\"gas\", \"density\"].shape"
      ],
      "language": "python",
@@ -250,8 +267,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
-      "from IPython.core.display import Image\n",
+      "yt.write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
+      "from IPython.display import Image\n",
       "Image(filename = \"temp.png\")"
      ],
      "language": "python",
@@ -274,7 +291,7 @@
      "collapsed": false,
      "input": [
       "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
-      "pw = cp.to_pw(fields = [\"density\"])"
+      "pw = cp.to_pw(fields = [(\"gas\", \"density\")])"
      ],
      "language": "python",
      "metadata": {},
@@ -309,7 +326,8 @@
      "collapsed": false,
      "input": [
       "pws = sl.to_pw(fields=[\"density\"])\n",
-      "pws.show()"
+      "#pws.show()\n",
+      "print pws.plots.keys()"
      ],
      "language": "python",
      "metadata": {},
@@ -361,4 +379,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ b/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:a19d451f3b4dcfeed448caa22c2cac35c46958e0646c19c226b1e467b76d0718"
+  "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -22,7 +22,9 @@
      "collapsed": false,
      "input": [
       "%matplotlib inline\n",
-      "from yt.mods import *\n",
+      "import yt\n",
+      "import numpy as np\n",
+      "from yt import derived_field\n",
       "from matplotlib import pylab"
      ],
      "language": "python",
@@ -61,7 +63,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "dd = ds.all_data()\n",
       "print dd.quantities.keys()"
      ],
@@ -120,7 +122,9 @@
       "bv = sp.quantities.bulk_velocity()\n",
       "L = sp.quantities.angular_momentum_vector()\n",
       "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
-      "print bv, L, rho_min, rho_max"
+      "print bv\n",
+      "print L\n",
+      "print rho_min, rho_max"
      ],
      "language": "python",
      "metadata": {},
@@ -143,9 +147,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prof = Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
+      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
       "prof.add_fields([\"temperature\",\"dinosaurs\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")"
+      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n",
+      "pylab.xlabel('Density $(g/cm^3)$')\n",
+      "pylab.ylabel('Temperature $(K)$')"
      ],
      "language": "python",
      "metadata": {},
@@ -162,7 +168,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')"
+      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n",
+      "pylab.xlabel('Density $(g/cm^3)$')\n",
+      "pylab.ylabel('Dinosaurs $(K cm / s)$')"
      ],
      "language": "python",
      "metadata": {},
@@ -179,9 +187,30 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prof = Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
+      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
       "prof.add_fields([\"cell_mass\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')"
+      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n",
+      "pylab.xlabel('Density $(g/cm^3)$')\n",
+      "pylab.ylabel('Cell mass $(M_\\odot)$')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class.  Let's redo the last plot using `ProfilePlot`"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n",
+      "prof.set_unit('cell_mass', 'Msun')\n",
+      "prof.show()"
      ],
      "language": "python",
      "metadata": {},

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/bootcamp/6)_Volume_Rendering.ipynb
--- a/doc/source/bootcamp/6)_Volume_Rendering.ipynb
+++ b/doc/source/bootcamp/6)_Volume_Rendering.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2929940fc3977b495aa124dee851f7602d61e073ed65407dd95e7cf597684b35"
+  "signature": "sha256:2a24bbe82955f9d948b39cbd1b1302968ff57f62f73afb2c7a5c4953393d00ae"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,8 +21,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *\n",
-      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+      "import yt\n",
+      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
      ],
      "language": "python",
      "metadata": {},
@@ -43,7 +43,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "tf = ColorTransferFunction((-28, -24))\n",
+      "tf = yt.ColorTransferFunction((-28, -24))\n",
       "tf.add_layers(4, w=0.01)\n",
       "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
       "cam.show()"
@@ -80,7 +80,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "tf = ColorTransferFunction((-28, -25))\n",
+      "tf = yt.ColorTransferFunction((-28, -25))\n",
       "tf.add_layers(4, w=0.03)\n",
       "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
       "cam.show(clip_ratio=4.0)"

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -1,18 +1,20 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import yt
 
 # Load the dataset.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the maximum gas density.  Note
-# that this sphere is very small compared to the size of our final plot,
-# and it has a non-axially aligned L vector.
-sp = ds.sphere("m", (1.0, "kpc"))
+# Create a 15 kpc radius sphere, centered on the center of the sim volume
+sp = ds.sphere("center", (15.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities.angular_momentum_vector()
 
 print "Angular momentum vector: {0}".format(L)
 
-# Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (15, "kpc"))
+# Create an OffAxisSlicePlot of density centered on the object with the L 
+# vector as its normal and a width of 25 kpc on a side
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
 p.save()

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED 
+
 # Using AMRKDTree Homogenized Volumes to examine large datasets
 # at lower resolution.
 
@@ -10,17 +13,17 @@
 import yt
 from yt.utilities.amr_kdtree.api import AMRKDTree
 
-# Load up a data and print out the maximum refinement level
+# Load up a dataset
 ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
 
 kd = AMRKDTree(ds)
-# Print out the total volume of all the bricks
-print kd.count_volume()
-# Print out the number of cells
-print kd.count_cells()
+
+# Print out specifics of KD Tree
+print "Total volume of all bricks = %i" % kd.count_volume()
+print "Total number of cells = %i" % kd.count_cells()
 
 tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
+cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
                   tf, volume=kd)
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
 cam.snapshot("v1.png", clip_ratio=6.0)

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/amrkdtree_to_uniformgrid.py
--- /dev/null
+++ b/doc/source/cookbook/amrkdtree_to_uniformgrid.py
@@ -0,0 +1,33 @@
+import numpy as np
+import yt
+
+#This is an example of how to map an amr data set
+#to a uniform grid. In this case the highest
+#level of refinement is mapped into a 1024x1024x1024 cube
+
+#first the amr data is loaded
+ds = yt.load("~/pfs/galaxy/new_tests/feedback_8bz/DD0021/DD0021")
+
+#next we get the maxium refinement level
+lmax = ds.parameters['MaximumRefinementLevel']
+
+#calculate the center of the domain
+domain_center = (ds.domain_right_edge - ds.domain_left_edge)/2
+
+#determine the cellsize in the highest refinement level
+cell_size = pf.domain_width/(pf.domain_dimensions*2**lmax)
+
+#calculate the left edge of the new grid
+left_edge = domain_center - 512*cell_size
+
+#the number of cells per side of the new grid
+ncells = 1024
+
+#ask yt for the specified covering grid
+cgrid = pf.h.covering_grid(lmax, left_edge, np.array([ncells,]*3))
+
+#get a map of the density into the new grid
+density_map = cgrid["density"].astype(dtype="float32")
+
+#save the file as a numpy array for convenient future processing
+np.save("/pfs/goldbaum/galaxy/new_tests/feedback_8bz/gas_density_DD0021_log_densities.npy", density_map)

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/average_value.py
--- a/doc/source/cookbook/average_value.py
+++ b/doc/source/cookbook/average_value.py
@@ -5,9 +5,10 @@
 field = "temperature"  # The field to average
 weight = "cell_mass"  # The weight for the average
 
-dd = ds.h.all_data()  # This is a region describing the entire box,
-                      # but note it doesn't read anything in yet!
+ad = ds.all_data()  # This is a region describing the entire box,
+                    # but note it doesn't read anything in yet!
+
 # We now use our 'quantities' call to get the average quantity
-average_value = dd.quantities["WeightedAverageQuantity"](field, weight)
+average_value = ad.quantities.weighted_average_quantity(field, weight)
 
-print "Average %s (weighted by %s) is %0.5e" % (field, weight, average_value)
+print "Average %s (weighted by %s) is %0.3e %s" % (field, weight, average_value, average_value.units)

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/boolean_data_objects.py
--- a/doc/source/cookbook/boolean_data_objects.py
+++ b/doc/source/cookbook/boolean_data_objects.py
@@ -1,23 +1,32 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import yt
 
 ds = yt.load("Enzo_64/DD0043/data0043")  # load data
-# Make a few data ojbects to start.
+# Make a few data ojbects to start. Two boxes and two spheres.
 re1 = ds.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4], [0.6, 0.6, 0.6])
 re2 = ds.region([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.6, 0.6, 0.6])
 sp1 = ds.sphere([0.5, 0.5, 0.5], 0.05)
 sp2 = ds.sphere([0.1, 0.2, 0.3], 0.1)
+
 # The "AND" operator. This will make a region identical to re2.
 bool1 = ds.boolean([re1, "AND", re2])
 xp = bool1["particle_position_x"]
+
 # The "OR" operator. This will make a region identical to re1.
 bool2 = ds.boolean([re1, "OR", re2])
+
 # The "NOT" operator. This will make a region like re1, but with the corner
 # that re2 covers cut out.
 bool3 = ds.boolean([re1, "NOT", re2])
+
 # Disjoint regions can be combined with the "OR" operator.
 bool4 = ds.boolean([sp1, "OR", sp2])
+
 # Find oddly-shaped overlapping regions.
 bool5 = ds.boolean([re2, "AND", sp1])
+
 # Nested logic with parentheses.
 # This is re1 with the oddly-shaped region cut out.
 bool6 = ds.boolean([re1, "NOT", "(", re1, "AND", sp1, ")"])

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -57,3 +57,12 @@
 serial the operation ``for pf in ts:`` would also have worked identically.
 
 .. yt_cookbook:: time_series.py
+
+Complex Derived Fields
+~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe estimates the ratio of gravitational and pressure forces in a galaxy
+cluster simulation.  This shows how to create and work with vector derived 
+fields.
+
+.. yt_cookbook:: hse_field.py

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,11 +1,13 @@
-import numpy as np
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
 import yt
+import numpy as np
 
 # Follow the simple_volume_rendering cookbook for the first part of this.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
-dd = ds.all_data()
-mi, ma = dd.quantities["Extrema"]("density")
+ad = ds.all_data()
+mi, ma = ad.quantities.extrema("density")
 
 # Set up transfer function
 tf = yt.ColorTransferFunction((np.log10(mi), np.log10(ma)))
@@ -40,4 +42,4 @@
 # Zoom in by a factor of 10 over 5 frames
 for i, snapshot in enumerate(cam.zoomin(10.0, 5, clip_ratio=8.0)):
     snapshot.write_png('camera_movement_%04i.png' % frame)
-    frame += 1
\ No newline at end of file
+    frame += 1

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -36,7 +36,7 @@
 axes.  To focus on what's happening in the x-y plane, we make an additional
 Temperature slice for the bottom-right subpanel.
 
-.. yt-cookbook:: multiplot_2x2_coordaxes_slice.py
+.. yt_cookbook:: multiplot_2x2_coordaxes_slice.py
 
 Multi-Plot Slice and Projections
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/contours_on_slice.py
--- a/doc/source/cookbook/contours_on_slice.py
+++ b/doc/source/cookbook/contours_on_slice.py
@@ -1,13 +1,12 @@
 import yt
 
 # first add density contours on a density slice
-pf = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")  # load data
-p = yt.SlicePlot(pf, "x", "density")
+ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")  
+p = yt.SlicePlot(ds, "x", "density")
 p.annotate_contour("density")
 p.save()
 
-# then add temperature contours on the same densty slice
-pf = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")  # load data
-p = yt.SlicePlot(pf, "x", "density")
+# then add temperature contours on the same density slice
+p = yt.SlicePlot(ds, "x", "density")
 p.annotate_contour("temperature")
-p.save(str(pf)+'_T_contour')
+p.save(str(ds)+'_T_contour')

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/extract_fixed_resolution_data.py
--- a/doc/source/cookbook/extract_fixed_resolution_data.py
+++ b/doc/source/cookbook/extract_fixed_resolution_data.py
@@ -8,21 +8,26 @@
 level = 2
 dims = ds.domain_dimensions * ds.refine_by**level
 
-# Now, we construct an object that describes the data region and structure we
-# want
-cube = ds.covering_grid(2,  # The level we are willing to extract to; higher
-                            # levels than this will not contribute to the data!
+# We construct an object that describes the data region and structure we want
+# In this case, we want all data up to the maximum "level" of refinement 
+# across the entire simulation volume.  Higher levels than this will not 
+# contribute to our covering grid.
+cube = ds.covering_grid(level,  
                         left_edge=[0.0, 0.0, 0.0],
+                        dims=dims,
                         # And any fields to preload (this is optional!)
-                        dims=dims,
                         fields=["density"])
 
 # Now we open our output file using h5py
-# Note that we open with 'w' which will overwrite existing files!
+# Note that we open with 'w' (write), which will overwrite existing files!
 f = h5py.File("my_data.h5", "w")
 
-# We create a dataset at the root note, calling it density...
+# We create a dataset at the root, calling it "density"
 f.create_dataset("/density", data=cube["density"])
 
 # We close our file
 f.close()
+
+# If we want to then access this datacube in the h5 file, we can now...
+f = h5py.File("my_data.h5", "r")
+print f["density"].value

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/ffmpeg_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/ffmpeg_volume_rendering.py
@@ -0,0 +1,99 @@
+#This is an example of how to make videos of 
+#uniform grid data using Theia and ffmpeg
+
+#The Scene object to hold the ray caster and view camera
+from yt.visualization.volume_rendering.theia.scene import TheiaScene
+
+#GPU based raycasting algorithm to use 
+from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
+
+#These will be used to define how to color the data
+from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
+from yt.visualization.color_maps import *
+
+#This will be used to launch ffmpeg
+import subprocess as sp
+
+#Of course we need numpy for math magic
+import numpy as np
+
+#Opacity scaling function
+def scale_func(v, mi, ma):
+      return  np.minimum(1.0, (v-mi)/(ma-mi) + 0.0)
+
+#load the uniform grid from a numpy array file
+bolshoi = "/home/bogert/log_densities_1024.npy"
+density_grid = np.load(bolshoi)
+
+#Set the TheiaScene to use the density_grid and 
+#setup the raycaster for a resulting 1080p image
+ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (1920,1080) ))
+
+#the min and max values in the data to color
+mi, ma = 0.0, 3.6
+
+#setup colortransferfunction
+bins = 5000
+tf = ColorTransferFunction( (mi, ma), bins)
+tf.map_to_colormap(0.5, ma, colormap="spring", scale_func = scale_func)
+
+#pass the transfer function to the ray caster
+ts.source.raycaster.set_transfer(tf)
+
+#Initial configuration for start of video
+#set initial opacity and brightness values
+#then zoom into the center of the data 30%
+ts.source.raycaster.set_opacity(0.03)
+ts.source.raycaster.set_brightness(2.3)
+ts.camera.zoom(30.0)
+
+#path to ffmpeg executable
+FFMPEG_BIN = "/usr/local/bin/ffmpeg"
+
+pipe = sp.Popen([ FFMPEG_BIN,
+        '-y', # (optional) overwrite the output file if it already exists
+	#This must be set to rawvideo because the image is an array
+        '-f', 'rawvideo', 
+	#This must be set to rawvideo because the image is an array
+        '-vcodec','rawvideo',
+	#The size of the image array and resulting video
+        '-s', '1920x1080', 
+	#This must be rgba to match array format (uint32)
+        '-pix_fmt', 'rgba',
+	#frame rate of video
+        '-r', '29.97', 
+        #Indicate that the input to ffmpeg comes from a pipe
+        '-i', '-', 
+        # Tells FFMPEG not to expect any audio
+        '-an', 
+        #Setup video encoder
+	#Use any encoder you life available from ffmpeg
+        '-vcodec', 'libx264', '-preset', 'ultrafast', '-qp', '0',
+        '-pix_fmt', 'yuv420p',
+        #Name of the output
+        'bolshoiplanck2.mkv' ],
+        stdin=sp.PIPE,stdout=sp.PIPE)
+		
+		
+#Now we loop and produce 500 frames
+for k in range (0,500) :
+    #update the scene resulting in a new image
+    ts.update()
+
+    #get the image array from the ray caster
+    array = ts.source.get_results()
+
+    #send the image array to ffmpeg
+    array.tofile(pipe.stdin)
+
+    #rotate the scene by 0.01 rads in x,y & z
+    ts.camera.rotateX(0.01)
+    ts.camera.rotateZ(0.01)
+    ts.camera.rotateY(0.01)
+
+    #zoom in 0.01% for a total of a 5% zoom
+    ts.camera.zoom(0.01)
+
+
+#Close the pipe to ffmpeg
+pipe.terminate()

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import numpy as np
 
 import yt

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -1,22 +1,21 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import yt
 from yt.analysis_modules.cosmological_observation.light_ray.api import LightRay
-from yt.analysis_modules.api import AbsorptionSpectrum
+from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 from yt.analysis_modules.absorption_spectrum.api import generate_total_fit
 
 # Define and add a field to simulate OVI based on a constant relationship to HI
-def _OVI_NumberDensity(field, data):
-    return data['HI_NumberDensity']
+# Do *NOT* use this for science, because this is not how OVI actually behaves;
+# it is just an example.
 
+ at yt.derived_field(name='OVI_number_density', units='cm**-3')
+def _OVI_number_density(field, data):
+    return data['HI_NumberDensity']*2.0
 
-def _convertOVI(data):
-    return 4.9E-4*.2
 
-yt.add_field('my_OVI_NumberDensity',
-             function=_OVI_NumberDensity,
-             convert_function=_convertOVI)
-
-
-# Define species andi associated parameters to add to continuum
+# Define species and associated parameters to add to continuum
 # Parameters used for both adding the transition to the spectrum
 # and for fitting
 # Note that for single species that produce multiple lines
@@ -37,7 +36,7 @@
                  'init_N': 1E14}
 
 OVI_parameters = {'name': 'OVI',
-                  'field': 'my_OVI_NumberDensity',
+                  'field': 'OVI_number_density',
                   'f': [.1325, .06580],
                   'Gamma': [4.148E8, 4.076E8],
                   'wavelength': [1031.9261, 1037.6167],

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/fits_xray_images.rst
--- a/doc/source/cookbook/fits_xray_images.rst
+++ b/doc/source/cookbook/fits_xray_images.rst
@@ -1,6 +1,6 @@
 .. _xray_fits:
 
 FITS X-ray Images in yt
-----------------------
+-----------------------
 
-.. notebook:: fits_xray_images.ipynb
\ No newline at end of file
+.. notebook:: fits_xray_images.ipynb

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ b/doc/source/cookbook/free_free_field.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import numpy as np
 import yt
 # Need to grab the proton mass from the constants database

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/global_phase_plots.py
--- a/doc/source/cookbook/global_phase_plots.py
+++ b/doc/source/cookbook/global_phase_plots.py
@@ -4,10 +4,10 @@
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # This is an object that describes the entire box
-ad = ds.h.all_data()
+ad = ds.all_data()
 
-# We plot the average VelocityMagnitude (mass-weighted) in our object
-# as a function of Density and temperature
+# We plot the average velocity magnitude (mass-weighted) in our object
+# as a function of density and temperature
 plot = yt.PhasePlot(ad, "density", "temperature", "velocity_magnitude")
 
 # save the plot

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/halo_merger_tree.py
--- a/doc/source/cookbook/halo_merger_tree.py
+++ b/doc/source/cookbook/halo_merger_tree.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 # This script demonstrates some of the halo merger tracking infrastructure,
 # for tracking halos across multiple datadumps in a time series.
 # Ultimately, it outputs an HDF5 file with the important quantities for the

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/halo_plotting.py
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -1,16 +1,20 @@
-"""
-This is a mechanism for plotting circles representing identified particle halos
-on an image.  For more information, see :ref:`halo_finding`.
-"""
-from yt.mods import * # set up our namespace
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
-data_pf = load("Enzo_64/RD0006/RedshiftOutput0006")
+import yt
+from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
 
-halo_pf = load('rockstar_halos/halos_0.0.bin')
+# Load the dataset
+ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006")
 
-hc - HaloCatalog(halos_pf = halo_pf)
+# Load the halo list from a rockstar output for this dataset
+halos = yt.load('rockstar_halos/halos_0.0.bin')
+
+# Create the halo catalog from this halo list
+hc = HaloCatalog(halos_pf = halos)
 hc.load()
 
-p = ProjectionPlot(pf, "x", "density")
+# Create a projection with the halos overplot on top
+p = yt.ProjectionPlot(ds, "x", "density")
 p.annotate_halos(hc)
 p.save()

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/halo_profiler.py
--- a/doc/source/cookbook/halo_profiler.py
+++ b/doc/source/cookbook/halo_profiler.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 from yt.mods import *
 
 from yt.analysis_modules.halo_profiler.api import *

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -1,11 +1,16 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import numpy as np
 import yt
 
 # Define the components of the gravitational acceleration vector field by
 # taking the gradient of the gravitational potential
 
-
-def _Grav_Accel_x(field, data):
+ at yt.derived_field(name='gravitational_acceleration_x',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_x(field, data):
 
     # We need to set up stencils
 
@@ -13,19 +18,22 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dx = div_fac * data['dx'].flat[0]
+    dx = div_fac * data['dx'][0]
 
     gx = data["gravitational_potential"][sl_right, 1:-1, 1:-1]/dx
     gx -= data["gravitational_potential"][sl_left, 1:-1, 1:-1]/dx
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')
+                         dtype='float64')*gx.uq
     new_field[1:-1, 1:-1, 1:-1] = -gx
 
     return new_field
 
 
-def _Grav_Accel_y(field, data):
+ at yt.derived_field(name='gravitational_acceleration_y',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_y(field, data):
 
     # We need to set up stencils
 
@@ -33,19 +41,23 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dy = div_fac * data['dy'].flat[0]
+    dy = div_fac * data['dy'].flatten()[0]
 
     gy = data["gravitational_potential"][1:-1, sl_right, 1:-1]/dy
     gy -= data["gravitational_potential"][1:-1, sl_left, 1:-1]/dy
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')
+                         dtype='float64')*gy.uq
+
     new_field[1:-1, 1:-1, 1:-1] = -gy
 
     return new_field
 
 
-def _Grav_Accel_z(field, data):
+ at yt.derived_field(name='gravitational_acceleration_z',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_z(field, data):
 
     # We need to set up stencils
 
@@ -53,13 +65,13 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dz = div_fac * data['dz'].flat[0]
+    dz = div_fac * data['dz'].flatten()[0]
 
     gz = data["gravitational_potential"][1:-1, 1:-1, sl_right]/dz
     gz -= data["gravitational_potential"][1:-1, 1:-1, sl_left]/dz
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')
+                         dtype='float64')*gz.uq
     new_field[1:-1, 1:-1, 1:-1] = -gz
 
     return new_field
@@ -68,7 +80,9 @@
 # Define the components of the pressure gradient field
 
 
-def _Grad_Pressure_x(field, data):
+ at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
+def grad_pressure_x(field, data):
 
     # We need to set up stencils
 
@@ -76,18 +90,20 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dx = div_fac * data['dx'].flat[0]
+    dx = div_fac * data['dx'].flatten()[0]
 
     px = data["pressure"][sl_right, 1:-1, 1:-1]/dx
     px -= data["pressure"][sl_left, 1:-1, 1:-1]/dx
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.uq
     new_field[1:-1, 1:-1, 1:-1] = px
 
     return new_field
 
 
-def _Grad_Pressure_y(field, data):
+ at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
+def grad_pressure_y(field, data):
 
     # We need to set up stencils
 
@@ -95,18 +111,20 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dy = div_fac * data['dy'].flat[0]
+    dy = div_fac * data['dy'].flatten()[0]
 
     py = data["pressure"][1:-1, sl_right, 1:-1]/dy
     py -= data["pressure"][1:-1, sl_left, 1:-1]/dy
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*py.uq
     new_field[1:-1, 1:-1, 1:-1] = py
 
     return new_field
 
 
-def _Grad_Pressure_z(field, data):
+ at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
+def grad_pressure_z(field, data):
 
     # We need to set up stencils
 
@@ -114,12 +132,12 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dz = div_fac * data['dz'].flat[0]
+    dz = div_fac * data['dz'].flatten()[0]
 
     pz = data["pressure"][1:-1, 1:-1, sl_right]/dz
     pz -= data["pressure"][1:-1, 1:-1, sl_left]/dz
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*pz.uq
     new_field[1:-1, 1:-1, 1:-1] = pz
 
     return new_field
@@ -127,70 +145,29 @@
 
 # Define the "degree of hydrostatic equilibrium" field
 
+ at yt.derived_field(name='HSE', units=None, take_log=False,
+                  display_name='Hydrostatic Equilibrium')
+def HSE(field, data):
 
-def _HSE(field, data):
+    gx = data["density"]*data["gravitational_acceleration_x"]
+    gy = data["density"]*data["gravitational_acceleration_y"]
+    gz = data["density"]*data["gravitational_acceleration_z"]
 
-    gx = data["density"]*data["Grav_Accel_x"]
-    gy = data["density"]*data["Grav_Accel_y"]
-    gz = data["density"]*data["Grav_Accel_z"]
-
-    hx = data["Grad_Pressure_x"] - gx
-    hy = data["Grad_Pressure_y"] - gy
-    hz = data["Grad_Pressure_z"] - gz
+    hx = data["grad_pressure_x"] - gx
+    hy = data["grad_pressure_y"] - gy
+    hz = data["grad_pressure_z"] - gz
 
     h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))
 
     return h
 
-# Now add the fields to the database
 
-yt.add_field("Grav_Accel_x", function=_Grav_Accel_x, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
+# Open a dataset from when there's a lot of sloshing going on.
 
-yt.add_field("Grav_Accel_y", function=_Grav_Accel_y, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
+ds = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
 
-yt.add_field("Grav_Accel_z", function=_Grav_Accel_z, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
 
-yt.add_field("Grad_Pressure_x", function=_Grad_Pressure_x, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["pressure"])])
+# Take a slice through the center of the domain
+slc = yt.SlicePlot(ds, 2, ["density", "HSE"], width=(1, 'Mpc'))
 
-yt.add_field("Grad_Pressure_y", function=_Grad_Pressure_y, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["pressure"])])
-
-yt.add_field("Grad_Pressure_z", function=_Grad_Pressure_z, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["pressure"])])
-
-yt.add_field("HSE", function=_HSE, take_log=False)
-
-# Open two files, one at the beginning and the other at a later time when
-# there's a lot of sloshing going on.
-
-dsi = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0000")
-dsf = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
-
-# Sphere objects centered at the cluster potential minimum with a radius
-# of 200 kpc
-
-sphere_i = dsi.h.sphere(dsi.domain_center, (200, "kpc"))
-sphere_f = dsf.h.sphere(dsf.domain_center, (200, "kpc"))
-
-# Average "degree of hydrostatic equilibrium" in these spheres
-
-hse_i = sphere_i.quantities["WeightedAverageQuantity"]("HSE", "cell_mass")
-hse_f = sphere_f.quantities["WeightedAverageQuantity"]("HSE", "cell_mass")
-
-print "Degree of hydrostatic equilibrium initially: ", hse_i
-print "Degree of hydrostatic equilibrium later: ", hse_f
-
-# Just for good measure, take slices through the center of the domains
-# of the two files
-
-slc_i = yt.SlicePlot(dsi, 2, ["density", "HSE"], center=dsi.domain_center,
-                     width=(1.0, "mpc"))
-slc_f = yt.SlicePlot(dsf, 2, ["density", "HSE"], center=dsf.domain_center,
-                     width=(1.0, "mpc"))
-
-slc_i.save("initial")
-slc_f.save("final")
+slc.save("hse")

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/image_background_colors.py
--- a/doc/source/cookbook/image_background_colors.py
+++ b/doc/source/cookbook/image_background_colors.py
@@ -1,21 +1,24 @@
-from yt.mods import *
-
 # This shows how to save ImageArray objects, such as those returned from 
 # volume renderings, to pngs with varying backgrounds.
 
+import yt
+import numpy as np
+
 # Lets make a fake "rendering" that has 4 channels and looks like a linear
 # gradient from the bottom to top.
+
 im = np.zeros([64,128,4])
 for i in xrange(im.shape[0]):
     for k in xrange(im.shape[2]):
         im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-im_arr = ImageArray(im)
+im_arr = yt.ImageArray(im)
 
 # in this case you would have gotten im_arr from something like:
 # im_arr = cam.snapshot() 
 
 # To save it with the default settings, we can just use write_png, where it 
 # rescales the image and uses a black background.
+
 im_arr.write_png('standard.png')
  
 # write_png accepts a background keyword argument that defaults to 'black'.
@@ -24,12 +27,8 @@
 # white (1.,1.,1.,1.)
 # None  (0.,0.,0.,0.) <-- Transparent!
 # any rgba list/array: [r,g,b,a], bounded by 0..1
+
 im_arr.write_png('black_bg.png', background='black')
 im_arr.write_png('white_bg.png', background='white')
 im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
 im_arr.write_png('transparent_bg.png', background=None)
-
-
-
-
-

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -18,9 +18,6 @@
 `here <http://yt-project.org/data/>`_, where you will find links to download 
 individual datasets.
 
-If you want to take a look at more complex recipes, or submit your own,
-check out the `yt Hub <http://hub.yt-project.org>`_.
-
 .. note:: To contribute your own recipes, please follow the instructions 
     on how to contribute documentation code: :ref:`writing_documentation`.
 

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/light_cone_projection.py
--- a/doc/source/cookbook/light_cone_projection.py
+++ b/doc/source/cookbook/light_cone_projection.py
@@ -1,9 +1,13 @@
-from yt.mods import *
-from yt.analysis_modules.api import LightCone
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
+import yt
+from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
 
 # Create a LightCone object extending from z = 0 to z = 0.1
 # with a 600 arcminute field of view and a resolution of
 # 60 arcseconds.
+
 # We have already set up the redshift dumps to be
 # used for this, so we will not use any of the time
 # data dumps.

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/light_cone_with_halo_mask.py
--- a/doc/source/cookbook/light_cone_with_halo_mask.py
+++ b/doc/source/cookbook/light_cone_with_halo_mask.py
@@ -1,7 +1,10 @@
-from yt.mods import *
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
-from yt.analysis_modules.api import LightCone
-from yt.analysis_modules.halo_profiler.api import *
+import yt
+
+from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
+from yt.analysis_modules.halo_profiler.api import HaloProfiler
 
 # Instantiate a light cone object as usual.
 lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/make_light_ray.py
--- a/doc/source/cookbook/make_light_ray.py
+++ b/doc/source/cookbook/make_light_ray.py
@@ -1,13 +1,16 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import os
 import sys
-
-from yt.mods import *
-
-from yt.analysis_modules.halo_profiler.api import *
-from yt.analysis_modules.cosmological_observation.light_ray.api import \
+import yt
+from yt.analysis_modules.halo_profiler.api import HaloProfiler
+from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
      LightRay
 
-if not os.path.isdir("LR"): os.mkdir('LR')
+# Create a directory for the light rays
+if not os.path.isdir("LR"): 
+    os.mkdir('LR')
      
 # Create a LightRay object extending from z = 0 to z = 0.1
 # and use only the redshift dumps.

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/multi_plot_3x2_FRB.py
--- a/doc/source/cookbook/multi_plot_3x2_FRB.py
+++ b/doc/source/cookbook/multi_plot_3x2_FRB.py
@@ -1,12 +1,14 @@
-from yt.mods import * # set up our namespace
+import yt
+import numpy as np
+from yt.visualization.api import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
 
 fn = "Enzo_64/RD0006/RedshiftOutput0006" # parameter file to load
 
-
-pf = load(fn) # load data
-v, c = pf.h.find_max("density")
+# load data and get center value and center location as maximum density location
+ds = yt.load(fn) 
+v, c = ds.find_max("density")
 
 # set up our Fixed Resolution Buffer parameters: a width, resolution, and center
 width = (1.0, 'unitary')
@@ -28,7 +30,7 @@
 # over the columns, which will become axes of slicing.
 plots = []
 for ax in range(3):
-    sli = pf.slice(ax, c[ax])
+    sli = ds.slice(ax, c[ax])
     frb = sli.to_frb(width, res)
     den_axis = axes[ax][0]
     temp_axis = axes[ax][1]
@@ -39,11 +41,16 @@
         ax.xaxis.set_visible(False)
         ax.yaxis.set_visible(False)
 
-    plots.append(den_axis.imshow(frb['density'], norm=LogNorm()))
+    # converting our fixed resolution buffers to NDarray so matplotlib can
+    # render them
+    dens = np.array(frb['density'])
+    temp = np.array(frb['temperature'])
+
+    plots.append(den_axis.imshow(dens, norm=LogNorm()))
     plots[-1].set_clim((5e-32, 1e-29))
     plots[-1].set_cmap("bds_highcontrast")
 
-    plots.append(temp_axis.imshow(frb['temperature'], norm=LogNorm()))
+    plots.append(temp_axis.imshow(temp, norm=LogNorm()))
     plots[-1].set_clim((1e3, 1e8))
     plots[-1].set_cmap("hot")
     
@@ -60,4 +67,4 @@
     cbar.set_label(t)
 
 # And now we're done!  
-fig.savefig("%s_3x2.png" % pf)
+fig.savefig("%s_3x2.png" % ds)

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/multi_plot_slice_and_proj.py
--- a/doc/source/cookbook/multi_plot_slice_and_proj.py
+++ b/doc/source/cookbook/multi_plot_slice_and_proj.py
@@ -1,4 +1,5 @@
-from yt.mods import * # set up our namespace
+import yt
+import numpy as np
 from yt.visualization.base_plot_types import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
@@ -6,7 +7,7 @@
 fn = "GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150" # parameter file to load
 orient = 'horizontal'
 
-pf = load(fn) # load data
+ds = yt.load(fn) # load data
 
 # There's a lot in here:
 #   From this we get a containing figure, a list-of-lists of axes into which we
@@ -17,12 +18,11 @@
 #   bw is the base-width in inches, but 4 is about right for most cases.
 fig, axes, colorbars = get_multi_plot(3, 2, colorbar=orient, bw = 4)
 
-slc = pf.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
-                 center=pf.domain_center)
-proj = pf.proj("density", 2, weight_field="density", center=pf.domain_center)
+slc = yt.SlicePlot(ds, 'z', fields=["density","temperature","velocity_magnitude"])
+proj = yt.ProjectionPlot(ds, 'z', "density", weight_field="density")
 
-slc_frb = slc.to_frb((1.0, "mpc"), 512)
-proj_frb = proj.to_frb((1.0, "mpc"), 512)
+slc_frb = slc.data_source.to_frb((1.0, "Mpc"), 512)
+proj_frb = proj.data_source.to_frb((1.0, "Mpc"), 512)
 
 dens_axes = [axes[0][0], axes[1][0]]
 temp_axes = [axes[0][1], axes[1][1]]
@@ -37,12 +37,22 @@
     vax.xaxis.set_visible(False)
     vax.yaxis.set_visible(False)
 
-plots = [dens_axes[0].imshow(slc_frb["density"], origin='lower', norm=LogNorm()),
-         dens_axes[1].imshow(proj_frb["density"], origin='lower', norm=LogNorm()),
-         temp_axes[0].imshow(slc_frb["temperature"], origin='lower'),    
-         temp_axes[1].imshow(proj_frb["temperature"], origin='lower'),
-         vels_axes[0].imshow(slc_frb["velocity_magnitude"], origin='lower', norm=LogNorm()),
-         vels_axes[1].imshow(proj_frb["velocity_magnitude"], origin='lower', norm=LogNorm())]
+# Converting our Fixed Resolution Buffers to numpy arrays so that matplotlib
+# can render them
+
+slc_dens = np.array(slc_frb['density'])
+proj_dens = np.array(proj_frb['density'])
+slc_temp = np.array(slc_frb['temperature'])
+proj_temp = np.array(proj_frb['temperature'])
+slc_vel = np.array(slc_frb['velocity_magnitude'])
+proj_vel = np.array(proj_frb['velocity_magnitude'])
+
+plots = [dens_axes[0].imshow(slc_dens, origin='lower', norm=LogNorm()),
+         dens_axes[1].imshow(proj_dens, origin='lower', norm=LogNorm()),
+         temp_axes[0].imshow(slc_temp, origin='lower'),    
+         temp_axes[1].imshow(proj_temp, origin='lower'),
+         vels_axes[0].imshow(slc_vel, origin='lower', norm=LogNorm()),
+         vels_axes[1].imshow(proj_vel, origin='lower', norm=LogNorm())]
          
 plots[0].set_clim((1.0e-27,1.0e-25))
 plots[0].set_cmap("bds_highcontrast")
@@ -58,12 +68,12 @@
 plots[5].set_cmap("gist_rainbow")
 
 titles=[r'$\mathrm{Density}\ (\mathrm{g\ cm^{-3}})$', 
-        r'$\mathrm{temperature}\ (\mathrm{K})$',
-        r'$\mathrm{VelocityMagnitude}\ (\mathrm{cm\ s^{-1}})$']
+        r'$\mathrm{Temperature}\ (\mathrm{K})$',
+        r'$\mathrm{Velocity Magnitude}\ (\mathrm{cm\ s^{-1}})$']
 
 for p, cax, t in zip(plots[0:6:2], colorbars, titles):
     cbar = fig.colorbar(p, cax=cax, orientation=orient)
     cbar.set_label(t)
 
 # And now we're done! 
-fig.savefig("%s_3x2" % pf)
+fig.savefig("%s_3x2" % ds)

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/multi_width_image.py
--- a/doc/source/cookbook/multi_width_image.py
+++ b/doc/source/cookbook/multi_width_image.py
@@ -1,15 +1,16 @@
-from yt.mods import *
+import yt
 
 # Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Create a slice plot for the dataset.  With no additional arguments,
 # the width will be the size of the domain and the center will be the
 # center of the simulation box
-slc = SlicePlot(pf,2,'density')
+slc = yt.SlicePlot(ds, 'z', 'density')
 
-# Create a list of a couple of widths and units.
-widths = [(1, 'mpc'),
+# Create a list of a couple of widths and units. 
+# (N.B. Mpc (megaparsec) != mpc (milliparsec)
+widths = [(1, 'Mpc'),
           (15, 'kpc')]
 
 # Loop through the list of widths and units.
@@ -19,12 +20,12 @@
     slc.set_width(width, unit)
 
     # Write out the image with a unique name.
-    slc.save("%s_%010d_%s" % (pf, width, unit))
+    slc.save("%s_%010d_%s" % (ds, width, unit))
 
 zoomFactors = [2,4,5]
 
 # recreate the original slice
-slc = SlicePlot(pf,2,'density')
+slc = yt.SlicePlot(ds, 'z', 'density')
 
 for zoomFactor in zoomFactors:
 
@@ -32,4 +33,4 @@
     slc.zoom(zoomFactor)
 
     # Write out the image with a unique name.
-    slc.save("%s_%i" % (pf, zoomFactor))
+    slc.save("%s_%i" % (ds, zoomFactor))

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/multiplot_2x2.py
--- a/doc/source/cookbook/multiplot_2x2.py
+++ b/doc/source/cookbook/multiplot_2x2.py
@@ -1,9 +1,9 @@
-from yt.mods import *
+import yt
 import matplotlib.pyplot as plt
 from mpl_toolkits.axes_grid1 import AxesGrid
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030"
-pf = load(fn) # load data
+ds = yt.load(fn) # load data
 
 fig = plt.figure()
 
@@ -22,11 +22,16 @@
                 cbar_size="3%",
                 cbar_pad="0%")
 
-fields = ['density', 'velocity_x', 'velocity_y', 'VelocityMagnitude']
+fields = ['density', 'velocity_x', 'velocity_y', 'velocity_magnitude']
 
 # Create the plot.  Since SlicePlot accepts a list of fields, we need only
 # do this once.
-p = SlicePlot(pf, 'z', fields)
+p = yt.SlicePlot(ds, 'z', fields)
+
+# Velocity is going to be both positive and negative, so let's make these
+# slices linear
+p.set_log('velocity_x', False)
+p.set_log('velocity_y', False)
 p.zoom(2)
 
 # For each plotted field, force the SlicePlot to redraw itself onto the AxesGrid

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
--- a/doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
+++ b/doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
@@ -1,9 +1,9 @@
-from yt.mods import *
+import yt
 import matplotlib.pyplot as plt
 from mpl_toolkits.axes_grid1 import AxesGrid
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030"
-pf = load(fn) # load data
+ds = yt.load(fn) # load data
 
 fig = plt.figure()
 
@@ -27,7 +27,7 @@
 
 for i, (direction, field) in enumerate(zip(cuts, fields)):
     # Load the data and create a single plot
-    p = SlicePlot(pf, direction, field)
+    p = yt.SlicePlot(ds, direction, field)
     p.zoom(40)
 
     # This forces the ProjectionPlot to redraw itself on the AxesGrid axes.

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/multiplot_2x2_time_series.py
--- a/doc/source/cookbook/multiplot_2x2_time_series.py
+++ b/doc/source/cookbook/multiplot_2x2_time_series.py
@@ -1,4 +1,4 @@
-from yt.mods import *
+import yt
 import matplotlib.pyplot as plt
 from mpl_toolkits.axes_grid1 import AxesGrid
 
@@ -23,8 +23,8 @@
 
 for i, fn in enumerate(fns):
     # Load the data and create a single plot
-    pf = load(fn) # load data
-    p = ProjectionPlot(pf, 'z', 'density', width=(55, 'Mpccm'))
+    ds = yt.load(fn) # load data
+    p = yt.ProjectionPlot(ds, 'z', 'density', width=(55, 'Mpccm'))
 
     # Ensure the colorbar limits match for all plots
     p.set_zlim('density', 1e-4, 1e-2)

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -1,7 +1,8 @@
-from yt.mods import *
+import yt
+import numpy as np
 
 # Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Choose a center for the render.
 c = [0.5, 0.5, 0.5]
@@ -25,10 +26,10 @@
 # Create the off axis projection.
 # Setting no_ghost to False speeds up the process, but makes a
 # slighly lower quality image.
-image = off_axis_projection(pf, c, L, W, Npixels, "density", no_ghost=False)
+image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Write out the final image and give it a name
 # relating to what our dataset is called.
 # We save the log of the values so that the colors do not span
 # many orders of magnitude.  Try it without and see what happens.
-write_image(np.log10(image), "%s_offaxis_projection.png" % pf)
+yt.write_image(np.log10(image), "%s_offaxis_projection.png" % ds)

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/offaxis_projection_colorbar.py
--- a/doc/source/cookbook/offaxis_projection_colorbar.py
+++ b/doc/source/cookbook/offaxis_projection_colorbar.py
@@ -1,8 +1,9 @@
-from yt.mods import * # set up our namespace
+import yt
+import numpy as np
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030" # parameter file to load
 
-pf = load(fn) # load data
+ds = yt.load(fn) # load data
 
 # Now we need a center of our volume to render.  Here we'll just use
 # 0.5,0.5,0.5, because volume renderings are not periodic.
@@ -31,9 +32,9 @@
 # Also note that we set the field which we want to project as "density", but
 # really we could use any arbitrary field like "temperature", "metallicity"
 # or whatever.
-image = off_axis_projection(pf, c, L, W, Npixels, "density", no_ghost=False)
+image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Image is now an NxN array representing the intensities of the various pixels.
 # And now, we call our direct image saver.  We save the log of the result.
-write_projection(image, "offaxis_projection_colorbar.png", 
-                 colorbar_label="Column Density (cm$^{-2}$)")
+yt.write_projection(image, "offaxis_projection_colorbar.png", 
+                    colorbar_label="Column Density (cm$^{-2}$)")

diff -r dab68d6ab636e2979ca0b25602c4b986edd4b3de -r b5ea6408117e17081da7076003eb130a48d5c290 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -1,20 +1,15 @@
-## Opaque Volume Rendering
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
-# The new version of yt also features opaque rendering, using grey opacity.
-# For example, this makes blues opaque to red and green.  In this example we
-# will explore how the opacity model you choose changes the appearance of the
-# rendering.
+import yt
+import numpy as np
 
-# Here we start by loading up a dataset, in this case galaxy0030.
-
-from yt.mods import *
-
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # We start by building a transfer function, and initializing a camera.
 
-tf = ColorTransferFunction((-30, -22))
-cam = pf.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
+tf = yt.ColorTransferFunction((-30, -22))
+cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
 
 # Now let's add some isocontours, and take a snapshot.
 
@@ -66,5 +61,3 @@
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  
-
-

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/5de451abf207/
Changeset:   5de451abf207
Branch:      yt-3.0
User:        atmyers
Date:        2014-06-30 22:47:31+00:00
Summary:     fixing an issue in the Chombo frontend related to the change to the HDF5FileHandler class
Affected #:  2 files

diff -r b5ea6408117e17081da7076003eb130a48d5c290 -r 5de451abf207d5379ae6ae14c93ce2c55f94ba0b yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -110,7 +110,7 @@
                 data[field][ub] /= weight_data[field][ub]
                 std_data[field][ub] /= weight_data[field][ub]
             self[field] = data[field]
-            #self["%s_std" % field] = np.sqrt(std_data[field])
+            self["%s_std" % field] = np.sqrt(std_data[field])
         self["UsedBins"] = used
 
         if fractional:

diff -r b5ea6408117e17081da7076003eb130a48d5c290 -r 5de451abf207d5379ae6ae14c93ce2c55f94ba0b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -127,7 +127,7 @@
         
         self.num_particles = 0
         particles_per_grid = []
-        for key, val in self._handle.items():
+        for key, val in self._handle['/'].items():
             if key.startswith('level'):
                 level_particles = val['particles:offsets'][:]
                 self.num_particles += level_particles.sum()


https://bitbucket.org/yt_analysis/yt/commits/f0afdef13a62/
Changeset:   f0afdef13a62
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-13 15:18:12+00:00
Summary:     Be much more careful about assigning clump IDs.
Affected #:  1 file

diff -r 9ee033f16a58215d66b84bc408c8624f8556c790 -r f0afdef13a6282129e1f768a5777f727d35ebce4 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -108,10 +108,21 @@
         self.children = []
         if max_val is None: max_val = self.max_val
         nj, cids = identify_contours(self.data, self.field, min_val, max_val)
-        for cid in range(nj):
+        # Here, cids is the set of slices and values, keyed by the
+        # parent_grid_id, that defines the contours.  So we can figure out all
+        # the unique values of the contours by examining the list here.
+        unique_contours = set([])
+        for sl_list in cids.values():
+            for sl, ff in sl_list:
+                unique_contours.update(np.unique(ff))
+        for cid in sorted(unique_contours):
             new_clump = self.data.cut_region(
-                    ["obj['Contours'] == %s" % (cid + 1)],
+                    ["obj['Contours'] == %s" % cid],
                     {'contour_slices': cids})
+            if new_clump["Ones"].size == 0:
+                # This is to skip possibly duplicate clumps.  Using "Ones" here
+                # will speed things up.
+                continue
             self.children.append(Clump(new_clump, self, self.field,
                                        self.cached_fields,function=self.function,
                                        clump_info=self.clump_info))


https://bitbucket.org/yt_analysis/yt/commits/7eed10ded492/
Changeset:   7eed10ded492
Branch:      yt-3.0
User:        atmyers
Date:        2014-07-01 06:52:49+00:00
Summary:     bringing in changes from other branch
Affected #:  2 files

diff -r 5de451abf207d5379ae6ae14c93ce2c55f94ba0b -r 7eed10ded4921739e1e2748133ea864f36ffbe28 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -127,7 +127,7 @@
         
         self.num_particles = 0
         particles_per_grid = []
-        for key, val in self._handle['/'].items():
+        for key, val in self._handle.items():
             if key.startswith('level'):
                 level_particles = val['particles:offsets'][:]
                 self.num_particles += level_particles.sum()
@@ -146,14 +146,14 @@
 
         # look for fluid fields
         output_fields = []
-        for key, val in self._handle['/'].attrs.items():
+        for key, val in self._handle.attrs.items():
             if key.startswith("component"):
                 output_fields.append(val)
         self.field_list = [("chombo", c) for c in output_fields]
 
         # look for particle fields
         particle_fields = []
-        for key, val in self._handle['/'].attrs.items():
+        for key, val in self._handle.attrs.items():
             if key.startswith("particle"):
                 particle_fields.append(val)
         self.field_list.extend([("io", c) for c in particle_fields])        

diff -r 5de451abf207d5379ae6ae14c93ce2c55f94ba0b -r 7eed10ded4921739e1e2748133ea864f36ffbe28 yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -40,6 +40,10 @@
     def keys(self):
         return self.handle.keys
 
+    @property
+    def items(self):
+        return self.handle.items
+
 class FITSFileHandler(HDF5FileHandler):
     def __init__(self, filename):
         from yt.utilities.on_demand_imports import _astropy


https://bitbucket.org/yt_analysis/yt/commits/b1b42de2234f/
Changeset:   b1b42de2234f
Branch:      yt-3.0
User:        atmyers
Date:        2014-07-01 06:53:23+00:00
Summary:     fixing a problem I introduced for datasets with no ghost cells
Affected #:  1 file

diff -r 7eed10ded4921739e1e2748133ea864f36ffbe28 -r b1b42de2234fd76ce0b4a448c40c4735bf54019d yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -30,16 +30,13 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
+        self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
         self._read_ghost_info()
 
     def _read_ghost_info(self):
         self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
-        dim = len(self.ghost)
-        self._ghost_slice = [slice(g,-g, None) for g in self.ghost]
-
         # pad with zeros if the dataset is low-dimensional
-        self.ghost += (3 - dim)*(0,)
-        
+        self.ghost += (3 - self.dim)*(0,)
         self.ghost = np.array(self.ghost)
 
     _field_dict = None
@@ -48,7 +45,7 @@
         if self._field_dict is not None:
             return self._field_dict
         field_dict = {}
-        for key, val in self._handle['/'].attrs.items():
+        for key, val in self._handle.attrs.items():
             if key.startswith('component_'):
                 comp_number = int(re.match('component_(\d)', key).groups()[0])
                 field_dict[val] = comp_number
@@ -61,7 +58,7 @@
         if self._particle_field_index is not None:
             return self._particle_field_index
         field_dict = {}
-        for key, val in self._handle['/'].attrs.items():
+        for key, val in self._handle.attrs.items():
             if key.startswith('particle_'):
                 comp_number = int(re.match('particle_component_(\d)', key).groups()[0])
                 field_dict[val] = comp_number
@@ -69,11 +66,10 @@
         return self._particle_field_index        
         
     def _read_field_names(self,grid):
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
+        ncomp = int(self._handle.attrs['num_components'])
+        fns = [c[1] for c in f.attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
-
         lstring = 'level_%i' % grid.Level
         lev = self._handle[lstring]
         dims = grid.ActiveDimensions
@@ -85,7 +81,9 @@
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
         data_no_ghost = data.reshape(shape, order='F')
-        return data_no_ghost[self._ghost_slice]
+        ghost_slice = [slice(g, d-g, None) for g, d in zip(self.ghost, grid.ActiveDimensions)]
+        ghost_slice = ghost_slice[0:self.dim]
+        return data_no_ghost[ghost_slice]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -180,6 +178,7 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
+        self.dim = 2
         self._read_ghost_info()
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
@@ -190,6 +189,7 @@
     def __init__(self, pf, *args, **kwargs):
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
+        self.dim = 1
         self._handle = pf._handle   
         self._read_ghost_info()
 


https://bitbucket.org/yt_analysis/yt/commits/2dc210c95e98/
Changeset:   2dc210c95e98
Branch:      yt-3.0
User:        atmyers
Date:        2014-07-01 16:56:50+00:00
Summary:     merging
Affected #:  1 file

diff -r b1b42de2234fd76ce0b4a448c40c4735bf54019d -r 2dc210c95e98dd6e6303dcd037ece281695364c9 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -108,10 +108,21 @@
         self.children = []
         if max_val is None: max_val = self.max_val
         nj, cids = identify_contours(self.data, self.field, min_val, max_val)
-        for cid in range(nj):
+        # Here, cids is the set of slices and values, keyed by the
+        # parent_grid_id, that defines the contours.  So we can figure out all
+        # the unique values of the contours by examining the list here.
+        unique_contours = set([])
+        for sl_list in cids.values():
+            for sl, ff in sl_list:
+                unique_contours.update(np.unique(ff))
+        for cid in sorted(unique_contours):
             new_clump = self.data.cut_region(
                     ["obj['contours'] == %s" % (cid + 1)],
                     {'contour_slices': cids})
+            if new_clump["Ones"].size == 0:
+                # This is to skip possibly duplicate clumps.  Using "Ones" here
+                # will speed things up.
+                continue
             self.children.append(Clump(new_clump, self, self.field,
                                        self.cached_fields,function=self.function,
                                        clump_info=self.clump_info))


https://bitbucket.org/yt_analysis/yt/commits/de49523be9d7/
Changeset:   de49523be9d7
Branch:      yt-3.0
User:        atmyers
Date:        2014-07-10 00:37:48+00:00
Summary:     adding a new dataset to the answer tests for chombo
Affected #:  1 file

diff -r 2c7f31ad4c3f7081373ad1ad5203dc6c253aba55 -r de49523be9d72ae46d2fb36650ee09529e4adc9f yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -41,3 +41,14 @@
     for test in small_patch_amr(tb, _fields):
         test_tb.__name__ = test.description
         yield test
+
+_zp_fields = ("rhs", "phi", "gravitational_field_x",
+              "gravitational_field_y")
+zp = "ZeldovichPancake/plt32.2d.hdf5"
+ at requires_pf(zp)
+def test_zp():
+    pf = data_dir_load(zp)
+    yield assert_equal, str(pf), "plt32.2d.hdf5"
+    for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
+        test_tb.__name__ = test.description
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/cd85e51d283d/
Changeset:   cd85e51d283d
Branch:      yt-3.0
User:        atmyers
Date:        2014-07-10 00:40:40+00:00
Summary:     merging.
Affected #:  6 files

diff -r de49523be9d72ae46d2fb36650ee09529e4adc9f -r cd85e51d283de365c75b9b90a86957a039cb5fd9 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -108,10 +108,21 @@
         self.children = []
         if max_val is None: max_val = self.max_val
         nj, cids = identify_contours(self.data, self.field, min_val, max_val)
-        for cid in range(nj):
+        # Here, cids is the set of slices and values, keyed by the
+        # parent_grid_id, that defines the contours.  So we can figure out all
+        # the unique values of the contours by examining the list here.
+        unique_contours = set([])
+        for sl_list in cids.values():
+            for sl, ff in sl_list:
+                unique_contours.update(np.unique(ff))
+        for cid in sorted(unique_contours):
             new_clump = self.data.cut_region(
                     ["obj['contours'] == %s" % (cid + 1)],
                     {'contour_slices': cids})
+            if new_clump["Ones"].size == 0:
+                # This is to skip possibly duplicate clumps.  Using "Ones" here
+                # will speed things up.
+                continue
             self.children.append(Clump(new_clump, self, self.field,
                                        self.cached_fields,function=self.function,
                                        clump_info=self.clump_info))

diff -r de49523be9d72ae46d2fb36650ee09529e4adc9f -r cd85e51d283de365c75b9b90a86957a039cb5fd9 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -110,7 +110,7 @@
                 data[field][ub] /= weight_data[field][ub]
                 std_data[field][ub] /= weight_data[field][ub]
             self[field] = data[field]
-            #self["%s_std" % field] = np.sqrt(std_data[field])
+            self["%s_std" % field] = np.sqrt(std_data[field])
         self["UsedBins"] = used
 
         if fractional:

diff -r de49523be9d72ae46d2fb36650ee09529e4adc9f -r cd85e51d283de365c75b9b90a86957a039cb5fd9 yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -18,10 +18,16 @@
       ChomboHierarchy, \
       ChomboDataset, \
       Orion2Hierarchy, \
-      Orion2Dataset
+      Orion2Dataset, \
+      ChomboPICHierarchy, \
+      ChomboPICDataset
 
 from .fields import \
-      ChomboFieldInfo
+      ChomboFieldInfo, \
+      Orion2FieldInfo, \
+      ChomboPICFieldInfo1D, \
+      ChomboPICFieldInfo2D, \
+      ChomboPICFieldInfo3D
 
 from .io import \
       IOHandlerChomboHDF5

diff -r de49523be9d72ae46d2fb36650ee09529e4adc9f -r cd85e51d283de365c75b9b90a86957a039cb5fd9 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -42,7 +42,8 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo, Orion2FieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D 
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -347,6 +348,7 @@
                 valid = "Chombo_global" in fileh["/"]
                 # ORION2 simulations should always have this:
                 valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
+                valid = valid and not ('Charm_global' in fileh.keys())
                 fileh.close()
                 return valid
             except:
@@ -473,3 +475,50 @@
                 pass
         return False
 
+class ChomboPICHierarchy(ChomboHierarchy):
+
+    def __init__(self, pf, dataset_type="chombo_hdf5"):
+        ChomboHierarchy.__init__(self, pf, dataset_type)
+
+class ChomboPICDataset(ChomboDataset):
+
+    _index_class = ChomboPICHierarchy
+    _field_info_class = ChomboPICFieldInfo3D
+
+    def __init__(self, filename, dataset_type='chombo_hdf5',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
+
+        if self.dimensionality == 1:
+            self._field_info_class = ChomboPICFieldInfo1D
+
+        if self.dimensionality == 2:
+            self._field_info_class = ChomboPICFieldInfo2D
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+        
+        if orion2_ini_file_exists:
+            return True
+
+        if not pluto_ini_file_exists:
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Charm_global" in fileh["/"]
+                fileh.close()
+                return valid
+            except:
+                pass
+        return False
\ No newline at end of file

diff -r de49523be9d72ae46d2fb36650ee09529e4adc9f -r cd85e51d283de365c75b9b90a86957a039cb5fd9 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -88,3 +88,100 @@
                        units = "erg/cm**3")
         self.add_field("temperature", function=_temperature,
                        units="K")
+
+class ChomboPICFieldInfo3D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+        ("gravitational_field_z", ("code_length / code_time**2", ["gravitational-field-z"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+        ("particle_velocity_z", ("code_length / code_time", [], None)),
+    )
+
+def _dummy_position(field, data):
+    return 0.5*np.ones_like(data['particle_position_x'])
+
+def _dummy_velocity(field, data):
+    return np.zeros_like(data['particle_velocity_x'])
+
+def _dummy_field(field, data):
+    return 0.0 * data['gravitational_field_x']
+
+fluid_field_types = ['chombo', 'gas']
+particle_field_types = ['io', 'all']
+
+class ChomboPICFieldInfo2D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, pf, field_list):
+        super(ChomboPICFieldInfo2D, self).__init__(pf, field_list)
+
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+        
+        for ptype in particle_field_types:                
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+
+class ChomboPICFieldInfo1D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, pf, field_list):
+        super(ChomboPICFieldInfo1D, self).__init__(pf, field_list)
+        
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_y'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                    units = "code_length / code_time**2")
+
+        for ptype in particle_field_types:
+            self.add_field((ptype, "particle_position_y"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_velocity_y"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")

diff -r de49523be9d72ae46d2fb36650ee09529e4adc9f -r cd85e51d283de365c75b9b90a86957a039cb5fd9 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -30,6 +30,14 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
+        self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self._read_ghost_info()
+
+    def _read_ghost_info(self):
+        self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
+        # pad with zeros if the dataset is low-dimensional
+        self.ghost += (3 - self.dim)*(0,)
+        self.ghost = np.array(self.ghost)
 
     _field_dict = None
     @property
@@ -62,18 +70,20 @@
         fns = [c[1] for c in f.attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
-
         lstring = 'level_%i' % grid.Level
         lev = self._handle[lstring]
         dims = grid.ActiveDimensions
-        boxsize = dims.prod()
+        shape = grid.ActiveDimensions + 2*self.ghost
+        boxsize = shape.prod()
         
         grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
+        data_no_ghost = data.reshape(shape, order='F')
+        ghost_slice = [slice(g, d-g, None) for g, d in zip(self.ghost, grid.ActiveDimensions)]
+        ghost_slice = ghost_slice[0:self.dim]
+        return data_no_ghost[ghost_slice]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -83,16 +93,8 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             grid = chunks[0].objs[0]
-            lstring = 'level_%i' % grid.Level
-            lev = self._handle[lstring]
-            grid_offset = lev[self._offset_string][grid._level_id]
-            boxsize = grid.ActiveDimensions.prod()
             for ftype, fname in fields:
-                start = grid_offset+self.field_dict[fname]*boxsize
-                stop = start + boxsize
-                data = lev[self._data_string][start:stop]
-                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
-                                        order='F')
+                rv[ftype, fname] = self._read_data(grid, fname)
             return rv
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
@@ -108,16 +110,10 @@
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
-                lstring = 'level_%i' % g.Level
-                lev = self._handle[lstring]
-                grid_offset = lev[self._offset_string][g._level_id]
-                boxsize = g.ActiveDimensions.prod()
                 nd = 0
                 for field in fields:
-                    start = grid_offset+self.field_dict[fname]*boxsize
-                    stop = start + boxsize
-                    data = lev[self._data_string][start:stop]
-                    data = data.reshape(g.ActiveDimensions, order='F')
+                    ftype, fname = field
+                    data = self._read_data(g, fname)
                     nd = g.select(selector, data, rv[field], ind) # caches
                 ind += nd
         return rv
@@ -182,6 +178,8 @@
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle
+        self.dim = 2
+        self._read_ghost_info()
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
@@ -191,7 +189,9 @@
     def __init__(self, pf, *args, **kwargs):
         BaseIOHandler.__init__(self, pf, *args, **kwargs)
         self.pf = pf
+        self.dim = 1
         self._handle = pf._handle   
+        self._read_ghost_info()
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"


https://bitbucket.org/yt_analysis/yt/commits/1373a799e3e0/
Changeset:   1373a799e3e0
Branch:      yt-3.0
User:        atmyers
Date:        2014-07-14 18:51:40+00:00
Summary:     default to zero ghosts if an outputGhosts attribute is not in the output file
Affected #:  1 file

diff -r 2dc210c95e98dd6e6303dcd037ece281695364c9 -r 1373a799e3e071336cac93daf814f162af2f8c95 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -34,10 +34,14 @@
         self._read_ghost_info()
 
     def _read_ghost_info(self):
-        self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
-        # pad with zeros if the dataset is low-dimensional
-        self.ghost += (3 - self.dim)*(0,)
-        self.ghost = np.array(self.ghost)
+        try:
+            self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
+            # pad with zeros if the dataset is low-dimensional
+            self.ghost += (3 - self.dim)*(0,)
+            self.ghost = np.array(self.ghost)
+        except KeyError:
+            # assume zero ghosts if outputGhosts not present
+            self.ghost = np.array(self.dim)
 
     _field_dict = None
     @property


https://bitbucket.org/yt_analysis/yt/commits/0126235d817b/
Changeset:   0126235d817b
Branch:      yt-3.0
User:        atmyers
Date:        2014-07-14 20:35:05+00:00
Summary:     merging
Affected #:  3 files

diff -r 1373a799e3e071336cac93daf814f162af2f8c95 -r 0126235d817b9bd9d7c3141d172b9ccd865b079e yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -41,3 +41,14 @@
     for test in small_patch_amr(tb, _fields):
         test_tb.__name__ = test.description
         yield test
+
+_zp_fields = ("rhs", "phi", "gravitational_field_x",
+              "gravitational_field_y")
+zp = "ZeldovichPancake/plt32.2d.hdf5"
+ at requires_pf(zp)
+def test_zp():
+    pf = data_dir_load(zp)
+    yield assert_equal, str(pf), "plt32.2d.hdf5"
+    for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
+        test_tb.__name__ = test.description
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/05091190bfa9/
Changeset:   05091190bfa9
Branch:      yt-3.0
User:        atmyers
Date:        2014-08-04 19:23:23+00:00
Summary:     fixing a typo in a docstring
Affected #:  1 file

diff -r c259c6a5f8ac42834e77c4056bdb635de93620c8 -r 05091190bfa9ecfa47eb98cbcffa91ba45c04e9b yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -195,7 +195,7 @@
 
     def get_label(self, projected=False):
         """
-        Return a data label for the given field, inluding units.
+        Return a data label for the given field, including units.
         """
         name = self.name[1]
         if self.display_name is not None:


https://bitbucket.org/yt_analysis/yt/commits/e7adb8ac6438/
Changeset:   e7adb8ac6438
Branch:      yt-3.0
User:        atmyers
Date:        2014-08-05 06:37:27+00:00
Summary:     adding code to make particle scatter plots as a one-liner
Affected #:  1 file

diff -r 05091190bfa9ecfa47eb98cbcffa91ba45c04e9b -r e7adb8ac643809911eee5d39e4a81a2b5682cfd9 yt/visualization/particle_plotter.py
--- /dev/null
+++ b/yt/visualization/particle_plotter.py
@@ -0,0 +1,496 @@
+"""
+This is a simple mechanism for interfacing with Particle plots
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+import __builtin__
+import base64
+import os
+import types
+
+from functools import wraps
+from itertools import izip
+import matplotlib
+import numpy as np
+import cStringIO
+
+from .base_plot_types import ImagePlotMPL
+from .plot_container import \
+    ImagePlotContainer, \
+    log_transform, linear_transform
+from yt.data_objects.profiles import \
+    create_profile
+from yt.utilities.exceptions import \
+    YTNotInsideNotebook
+from yt.utilities.logger import ytLogger as mylog
+import _mpl_imports as mpl
+from yt.funcs import \
+    ensure_list, \
+    get_image_suffix, \
+    get_ipython_api_version
+from yt.units.unit_object import Unit
+
+def get_canvas(name):
+    suffix = get_image_suffix(name)
+    
+    if suffix == '':
+        suffix = '.png'
+    if suffix == ".png":
+        canvas_cls = mpl.FigureCanvasAgg
+    elif suffix == ".pdf":
+        canvas_cls = mpl.FigureCanvasPdf
+    elif suffix in (".eps", ".ps"):
+        canvas_cls = mpl.FigureCanvasPS
+    else:
+        mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+        canvas_cls = mpl.FigureCanvasAgg
+    return canvas_cls
+
+def invalidate_plot(f):
+    @wraps(f)
+    def newfunc(*args, **kwargs):
+        rv = f(*args, **kwargs)
+        args[0]._plot_valid = False
+        args[0]._setup_plots()
+        return rv
+    return newfunc
+
+class FigureContainer(dict):
+    def __init__(self):
+        super(FigureContainer, self).__init__()
+
+    def __missing__(self, key):
+        figure = mpl.matplotlib.figure.Figure((10, 8))
+        self[key] = figure
+        return self[key]
+
+class AxesContainer(dict):
+    def __init__(self, fig_container):
+        self.fig_container = fig_container
+        self.ylim = {}
+        super(AxesContainer, self).__init__()
+
+    def __missing__(self, key):
+        figure = self.fig_container[key]
+        self[key] = figure.add_subplot(111)
+        return self[key]
+
+    def __setitem__(self, key, value):
+        super(AxesContainer, self).__setitem__(key, value)
+        self.ylim[key] = (None, None)
+
+def sanitize_label(label, nprofiles):
+    label = ensure_list(label)
+    
+    if len(label) == 1:
+        label = label * nprofiles
+    
+    if len(label) != nprofiles:
+        raise RuntimeError("Number of labels must match number of profiles")
+
+    for l in label:
+        if l is not None and not isinstance(l, basestring):
+            raise RuntimeError("All labels must be None or a string")
+
+    return label
+
+class ParticlePlot(object):
+    r"""
+    Create a particle scatter plot from a data source.
+
+    Given a data object (all_data, region, sphere, etc.), an x field, 
+    and a y field (or fields), this will a scatter plot with one marker
+    for each particle.
+
+    Parameters
+    ----------
+    data_source : AMR3DData Object
+        The data object to be profiled, such as all_data, region, or 
+        sphere.
+    x_field : str
+        The field to plot on the x-axis.
+    y_fields : str
+        The field to plot on the y-axis.
+    plot_spec : dict or list of dicts
+        A dictionary or list of dictionaries containing plot keyword 
+        arguments.  For example, dict(color="blue", linestyle=".").
+        Default: None.
+
+    Examples
+    --------
+
+    This creates profiles of a single dataset.
+
+    >>> import yt
+    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> ad = ds.all_data()
+    >>> plot = ProfilePlot(ad, "density", ["temperature", "velocity_x"],
+    ...                    weight_field="cell_mass",
+    ...                    plot_spec=dict(color='red', linestyle="--"))
+    >>> plot.save()
+
+    This creates profiles from a time series object.
+
+    >>> es = yt.simulation("AMRCosmology.enzo", "Enzo")
+    >>> es.get_time_series()
+
+    >>> profiles = []
+    >>> labels = []
+    >>> plot_specs = []
+    >>> for ds in es[-4:]:
+    ...     ad = ds.all_data()
+    ...     profiles.append(create_profile(ad, ["density"],
+    ...                                    fields=["temperature",
+    ...                                            "velocity_x"]))
+    ...     labels.append(ds.current_redshift)
+    ...     plot_specs.append(dict(linestyle="--", alpha=0.7))
+    >>>
+    >>> plot = ProfilePlot.from_profiles(profiles, labels=labels,
+    ...                                  plot_specs=plot_specs)
+    >>> plot.save()
+
+    Use plot_line_property to change line properties of one or all profiles.
+    
+    """
+    x_log = None
+    y_log = None
+    z_log = None
+    x_title = None
+    y_title = None
+    _plot_valid = False
+
+    def __init__(self, data_source, x_field, y_field,
+                 plot_spec=None):
+
+        if plot_spec is None:
+            plot_spec = {'c':'b', 'marker':'.', 'linestyle':'None', 'markersize':8}
+
+        self.data_source = data_source
+        self.x_field = x_field
+        self.y_field = y_field
+        self.plot_spec = plot_spec
+
+        self.x_data = self.data_source[x_field]
+        self.y_data = self.data_source[y_field]
+        
+        self.figure = mpl.matplotlib.figure.Figure((10, 8))
+        self.axis = self.figure.add_subplot(111)
+        self._setup_plots()
+
+    def save(self, name=None):
+        r"""
+         Saves the scatter plot to disk.
+
+         Parameters
+         ----------
+         name : str
+             The output file keyword.
+
+         """
+        if not self._plot_valid:
+            self._setup_plots()
+        unique = set(self.figures.values())
+        if len(unique) < len(self.figures):
+            iters = izip(xrange(len(unique)), sorted(unique))
+        else:
+            iters = self.figures.iteritems()
+        if name is None:
+            if len(self.profiles) == 1:
+                prefix = self.profiles[0].ds
+            else:
+                prefix = "Multi-data"
+            name = "%s.png" % prefix
+        suffix = get_image_suffix(name)
+        prefix = name[:name.rfind(suffix)]
+        xfn = self.profiles[0].x_field
+        if isinstance(xfn, types.TupleType):
+            xfn = xfn[1]
+        if not suffix:
+            suffix = ".png"
+        canvas_cls = get_canvas(name)
+        fns = []
+        for uid, fig in iters:
+            if isinstance(uid, types.TupleType):
+                uid = uid[1]
+            canvas = canvas_cls(fig)
+            fns.append("%s_1d-Profile_%s_%s%s" % (prefix, xfn, uid, suffix))
+            mylog.info("Saving %s", fns[-1])
+            canvas.print_figure(fns[-1])
+        return fns
+
+    def show(self):
+        r"""This will send any existing plots to the IPython notebook.
+        function name.
+
+        If yt is being run from within an IPython session, and it is able to
+        determine this, this function will send any existing plots to the
+        notebook for display.
+
+        If yt can't determine if it's inside an IPython session, it will raise
+        YTNotInsideNotebook.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = ProfilePlot(ds.all_data(), 'density', 'temperature')
+        >>> pp.show()
+
+        """
+        if "__IPYTHON__" in dir(__builtin__):
+            api_version = get_ipython_api_version()
+            if api_version in ('0.10', '0.11'):
+                self._send_zmq()
+            else:
+                from IPython.display import display
+                display(self)
+        else:
+            raise YTNotInsideNotebook
+
+    def _repr_html_(self):
+        """Return an html representation of the plot object. Will display as a
+        png for each WindowPlotMPL instance in self.plots"""
+        ret = ''
+        canvas = mpl.FigureCanvasAgg(self.figure)
+        f = cStringIO.StringIO()
+        canvas.print_figure(f)
+        f.seek(0)
+        img = base64.b64encode(f.read())
+        ret += '<img src="data:image/png;base64,%s"><br>' % img
+        return ret
+
+    def _setup_plots(self):
+        self.axis.plot(np.array(self.x_data), np.array(self.y_data),
+                       **self.plot_spec)
+
+        xscale = self._get_field_log(self.x_field)
+        yscale = self._get_field_log(self.y_field)
+
+        xtitle = self._get_field_title(self.x_field)
+        ytitle = self._get_field_title(self.y_field)
+
+        self.axis.set_xscale(xscale)
+        self.axis.set_yscale(yscale)
+
+        self.axis.set_xlabel(xtitle)
+        self.axis.set_ylabel(ytitle)
+
+        self._plot_valid = True
+
+    @invalidate_plot
+    def set_line_property(self, property, value, index=None):
+        r"""
+        Set properties for one or all lines to be plotted.
+
+        Parameters
+        ----------
+        property : str
+            The line property to be set.
+        value : str, int, float
+            The value to set for the line property.
+        index : int
+            The index of the profile in the list of profiles to be 
+            changed.  If None, change all plotted lines.
+            Default : None.
+
+        Examples
+        --------
+
+        Change all the lines in a plot
+        plot.set_line_property("linestyle", "-")
+
+        Change a single line.
+        plot.set_line_property("linewidth", 4, index=0)
+        
+        """
+        if index is None:
+            specs = self.plot_spec
+        else:
+            specs = [self.plot_spec[index]]
+        for spec in specs:
+            spec[property] = value
+        return self
+
+    @invalidate_plot
+    def set_log(self, field, log):
+        """set a field to log or linear.
+
+        Parameters
+        ----------
+        field : string
+            the field to set a transform
+        log : boolean
+            Log on/off.
+        """
+        if field == "all":
+            self.x_log = log
+            for field in self.profiles[0].field_data.keys():
+                self.y_log[field] = log
+        else:
+            field, = self.profiles[0].data_source._determine_fields([field])
+            if field == self.profiles[0].x_field:
+                self.x_log = log
+            elif field in self.profiles[0].field_data:
+                self.y_log[field] = log
+            else:
+                raise KeyError("Field %s not in profile plot!" % (field))
+        return self
+
+    @invalidate_plot
+    def set_unit(self, field, unit):
+        """Sets a new unit for the requested field
+
+        Parameters
+        ----------
+        field : string
+           The name of the field that is to be changed.
+
+        new_unit : string or Unit object
+           The name of the new unit.
+        """
+        for profile in self.profiles:
+            if field == profile.x_field[1]:
+                profile.set_x_unit(unit)
+            elif field in self.profiles[0].field_map:
+                profile.set_field_unit(field, unit)
+            else:
+                raise KeyError("Field %s not in profile plot!" % (field))
+        return self
+
+    @invalidate_plot
+    def set_xlim(self, xmin=None, xmax=None):
+        """Sets the limits of the bin field
+
+        Parameters
+        ----------
+        
+        xmin : float or None
+          The new x minimum.  Defaults to None, which leaves the xmin
+          unchanged.
+
+        xmax : float or None
+          The new x maximum.  Defaults to None, which leaves the xmax
+          unchanged.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', 'temperature')
+        >>> pp.set_xlim(1e-29, 1e-24)
+        >>> pp.save()
+
+        """
+        for i, p in enumerate(self.profiles):
+            if xmin is None:
+                xmi = p.x_bins.min()
+            else:
+                xmi = xmin
+            if xmax is None:
+                xma = p.x_bins.max()
+            else:
+                xma = xmax
+            extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
+            units = {p.x_field: str(p.x.units)}
+            for field in p.field_map.values():
+                units[field] = str(p.field_data[field].units)
+            self.profiles[i] = \
+                create_profile(p.data_source, p.x_field,
+                               n_bins=len(p.x_bins)-2,
+                               fields=p.field_map.values(),
+                               weight_field=p.weight_field,
+                               accumulation=p.accumulation,
+                               fractional=p.fractional,
+                               extrema=extrema, units=units)
+        return self
+
+    @invalidate_plot
+    def set_ylim(self, field, ymin=None, ymax=None):
+        """Sets the plot limits for the specified field we are binning.
+
+        Parameters
+        ----------
+
+        field : string or field tuple
+
+        The field that we want to adjust the plot limits for.
+        
+        ymin : float or None
+          The new y minimum.  Defaults to None, which leaves the ymin
+          unchanged.
+
+        ymax : float or None
+          The new y maximum.  Defaults to None, which leaves the ymax
+          unchanged.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', ['temperature', 'x-velocity'])
+        >>> pp.set_ylim('temperature', 1e4, 1e6)
+        >>> pp.save()
+
+        """
+        for i, p in enumerate(self.profiles):
+            if field is 'all':
+                fields = self.axes.keys()
+            else:
+                fields = ensure_list(field)
+            for profile in self.profiles:
+                for field in profile.data_source._determine_fields(fields):
+                    if field in profile.field_map:
+                        field = profile.field_map[field]
+                    self.axes.ylim[field] = (ymin, ymax)
+                    # Continue on to the next profile.
+                    break
+        return self
+
+    def _get_field_log(self, field):
+        ds = self.data_source.ds
+        f, = self.data_source._determine_fields([field])
+        fi = ds._get_field_info(*f)
+        do_log = fi.take_log
+        scales = {True: 'log', False: 'linear'}
+        return scales[do_log]
+
+    def _get_field_label(self, field, field_info, field_unit, fractional=False):
+        field_unit = field_unit.latex_representation()
+        field_name = field_info.display_name
+        if isinstance(field, tuple): field = field[1]
+        if field_name is None:
+            field_name = r'$\rm{'+field+r'}$'
+            field_name = r'$\rm{'+field.replace('_','\/').title()+r'}$'
+        elif field_name.find('$') == -1:
+            field_name = field_name.replace(' ','\/')
+            field_name = r'$\rm{'+field_name+r'}$'
+        if fractional:
+            label = field_name + r'$\rm{\/Probability\/Density}$'
+        elif field_unit is None or field_unit == '' or field_unit == '1':
+            label = field_name
+        else:
+            field_unit = field_unit.latex_representation()
+            label = field_name+r'$\/\/('+field_unit+r')$'
+        return label
+
+    def _get_field_title(self, field):
+        ds = self.data_source.ds
+        f, = self.data_source._determine_fields([field])
+        fi = ds._get_field_info(*f)
+        field_unit = Unit(fi.units, registry=self.data_source.ds.unit_registry)
+        title = self._get_field_label(field, fi, field_unit)
+        return title


https://bitbucket.org/yt_analysis/yt/commits/1434d4642694/
Changeset:   1434d4642694
Branch:      yt-3.0
User:        atmyers
Date:        2014-08-05 07:22:19+00:00
Summary:     merging
Affected #:  7 files

diff -r e7adb8ac643809911eee5d39e4a81a2b5682cfd9 -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -18,10 +18,16 @@
       ChomboHierarchy, \
       ChomboDataset, \
       Orion2Hierarchy, \
-      Orion2Dataset
+      Orion2Dataset, \
+      ChomboPICHierarchy, \
+      ChomboPICDataset
 
 from .fields import \
-      ChomboFieldInfo
+      ChomboFieldInfo, \
+      Orion2FieldInfo, \
+      ChomboPICFieldInfo1D, \
+      ChomboPICFieldInfo2D, \
+      ChomboPICFieldInfo3D
 
 from .io import \
       IOHandlerChomboHDF5

diff -r e7adb8ac643809911eee5d39e4a81a2b5682cfd9 -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -42,7 +42,8 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo, Orion2FieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D 
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -347,6 +348,7 @@
                 valid = "Chombo_global" in fileh["/"]
                 # ORION2 simulations should always have this:
                 valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
+                valid = valid and not ('Charm_global' in fileh.keys())
                 fileh.close()
                 return valid
             except:
@@ -473,3 +475,50 @@
                 pass
         return False
 
+class ChomboPICHierarchy(ChomboHierarchy):
+
+    def __init__(self, pf, dataset_type="chombo_hdf5"):
+        ChomboHierarchy.__init__(self, pf, dataset_type)
+
+class ChomboPICDataset(ChomboDataset):
+
+    _index_class = ChomboPICHierarchy
+    _field_info_class = ChomboPICFieldInfo3D
+
+    def __init__(self, filename, dataset_type='chombo_hdf5',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
+
+        if self.dimensionality == 1:
+            self._field_info_class = ChomboPICFieldInfo1D
+
+        if self.dimensionality == 2:
+            self._field_info_class = ChomboPICFieldInfo2D
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+        
+        if orion2_ini_file_exists:
+            return True
+
+        if not pluto_ini_file_exists:
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Charm_global" in fileh["/"]
+                fileh.close()
+                return valid
+            except:
+                pass
+        return False
\ No newline at end of file

diff -r e7adb8ac643809911eee5d39e4a81a2b5682cfd9 -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -88,3 +88,100 @@
                        units = "erg/cm**3")
         self.add_field("temperature", function=_temperature,
                        units="K")
+
+class ChomboPICFieldInfo3D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+        ("gravitational_field_z", ("code_length / code_time**2", ["gravitational-field-z"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+        ("particle_velocity_z", ("code_length / code_time", [], None)),
+    )
+
+def _dummy_position(field, data):
+    return 0.5*np.ones_like(data['particle_position_x'])
+
+def _dummy_velocity(field, data):
+    return np.zeros_like(data['particle_velocity_x'])
+
+def _dummy_field(field, data):
+    return 0.0 * data['gravitational_field_x']
+
+fluid_field_types = ['chombo', 'gas']
+particle_field_types = ['io', 'all']
+
+class ChomboPICFieldInfo2D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, pf, field_list):
+        super(ChomboPICFieldInfo2D, self).__init__(pf, field_list)
+
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+        
+        for ptype in particle_field_types:                
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+
+class ChomboPICFieldInfo1D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, pf, field_list):
+        super(ChomboPICFieldInfo1D, self).__init__(pf, field_list)
+        
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_y'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                    units = "code_length / code_time**2")
+
+        for ptype in particle_field_types:
+            self.add_field((ptype, "particle_position_y"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_velocity_y"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")

diff -r e7adb8ac643809911eee5d39e4a81a2b5682cfd9 -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -26,10 +26,22 @@
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, ds, *args, **kwargs):
-        BaseIOHandler.__init__(self, ds, *args, **kwargs)
-        self.ds = ds
-        self._handle = ds._handle
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self._read_ghost_info()
+
+    def _read_ghost_info(self):
+        try:
+            self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
+            # pad with zeros if the dataset is low-dimensional
+            self.ghost += (3 - self.dim)*(0,)
+            self.ghost = np.array(self.ghost)
+        except KeyError:
+            # assume zero ghosts if outputGhosts not present
+            self.ghost = np.array(self.dim)
 
     _field_dict = None
     @property
@@ -62,18 +74,20 @@
         fns = [c[1] for c in f.attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
-
         lstring = 'level_%i' % grid.Level
         lev = self._handle[lstring]
         dims = grid.ActiveDimensions
-        boxsize = dims.prod()
+        shape = grid.ActiveDimensions + 2*self.ghost
+        boxsize = shape.prod()
         
         grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
+        data_no_ghost = data.reshape(shape, order='F')
+        ghost_slice = [slice(g, d-g, None) for g, d in zip(self.ghost, grid.ActiveDimensions)]
+        ghost_slice = ghost_slice[0:self.dim]
+        return data_no_ghost[ghost_slice]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -83,16 +97,8 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             grid = chunks[0].objs[0]
-            lstring = 'level_%i' % grid.Level
-            lev = self._handle[lstring]
-            grid_offset = lev[self._offset_string][grid._level_id]
-            boxsize = grid.ActiveDimensions.prod()
             for ftype, fname in fields:
-                start = grid_offset+self.field_dict[fname]*boxsize
-                stop = start + boxsize
-                data = lev[self._data_string][start:stop]
-                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
-                                        order='F')
+                rv[ftype, fname] = self._read_data(grid, fname)
             return rv
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
@@ -108,16 +114,10 @@
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
-                lstring = 'level_%i' % g.Level
-                lev = self._handle[lstring]
-                grid_offset = lev[self._offset_string][g._level_id]
-                boxsize = g.ActiveDimensions.prod()
                 nd = 0
                 for field in fields:
-                    start = grid_offset+self.field_dict[fname]*boxsize
-                    stop = start + boxsize
-                    data = lev[self._data_string][start:stop]
-                    data = data.reshape(g.ActiveDimensions, order='F')
+                    ftype, fname = field
+                    data = self._read_data(g, fname)
                     nd = g.select(selector, data, rv[field], ind) # caches
                 ind += nd
         return rv
@@ -178,20 +178,24 @@
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, ds, *args, **kwargs):
-        BaseIOHandler.__init__(self, ds, *args, **kwargs)
-        self.ds = ds
-        self._handle = ds._handle
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self.dim = 2
+        self._read_ghost_info()
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, ds, *args, **kwargs):
-        BaseIOHandler.__init__(self, ds, *args, **kwargs)
-        self.ds = ds
-        self._handle = ds._handle   
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self.dim = 1
+        self._handle = pf._handle   
+        self._read_ghost_info()
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"

diff -r e7adb8ac643809911eee5d39e4a81a2b5682cfd9 -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -45,10 +45,10 @@
 _zp_fields = ("rhs", "phi", "gravitational_field_x",
               "gravitational_field_y")
 zp = "ZeldovichPancake/plt32.2d.hdf5"
- at requires_ds(zp)
+ at requires_pf(zp)
 def test_zp():
-    ds = data_dir_load(zp)
-    yield assert_equal, str(ds), "plt32.2d.hdf5"
+    pf = data_dir_load(zp)
+    yield assert_equal, str(pf), "plt32.2d.hdf5"
     for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
         test_tb.__name__ = test.description
         yield test


https://bitbucket.org/yt_analysis/yt/commits/21ffbfd2081d/
Changeset:   21ffbfd2081d
Branch:      yt
User:        atmyers
Date:        2014-08-05 07:25:35+00:00
Summary:     merging
Affected #:  6 files

diff -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 -r 21ffbfd2081d180d251540df7e3d986a15d117c7 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -540,22 +540,23 @@
             temp_e2[:,dim] = e2_vector[dim]
         length = np.abs(np.sum(rr * temp_e2, axis = 1) * (1 - \
             np.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
-            np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+            np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2.)**(-0.5))
         length[length == np.inf] = 0.
         tC_index = np.nanargmax(length)
         mag_C = length[tC_index]
         # tilt is calculated from the rotation about x axis
         # needed to align e1 vector with the y axis
         # after e0 is aligned with x axis
-        # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = np.arctan(e0_vector[1] / e0_vector[0])
-        RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
-        r1 = (e0_vector * RZ).sum(axis = 1)
+        # find the t1 angle needed to rotate about z axis to align e0 onto x-z plane
+        t1 = np.arctan(-e0_vector[1] / e0_vector[0])
+        RZ = get_rotation_matrix(t1, (0, 0, 1))
+        r1 = np.dot(RZ, e0_vector)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = np.arctan(-r1[2] / r1[0])
-        RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
+        t2 = np.arctan(r1[2] / r1[0])
+        RY = get_rotation_matrix(t2, (0, 1, 0))
         r2 = np.dot(RY, np.dot(RZ, e1_vector))
-        tilt = np.arctan(r2[2]/r2[1])
+        # find the tilt angle needed to rotate about x axis to align e1 to y and e2 to z
+        tilt = np.arctan(-r2[2] / r2[1])
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
@@ -771,13 +772,13 @@
         
         Returns
         -------
-        tuple : (cm, mag_A, mag_B, mag_C, e1_vector, tilt)
+        tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
             The 6-tuple has in order:
               #. The center of mass as an array.
               #. mag_A as a float.
               #. mag_B as a float.
               #. mag_C as a float.
-              #. e1_vector as an array.
+              #. e0_vector as an array.
               #. tilt as a float.
         
         Examples
@@ -808,7 +809,7 @@
     def __init__(self, pf, id, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
-        e1_vec=None, tilt=None, supp=None):
+        e0_vec=None, tilt=None, supp=None):
 
         self.pf = pf
         self.gridsize = (self.pf.domain_right_edge - \
@@ -824,7 +825,7 @@
         self.mag_A = mag_A
         self.mag_B = mag_B
         self.mag_C = mag_C
-        self.e1_vec = e1_vec
+        self.e0_vec = e0_vec
         self.tilt = tilt
         # locs=the names of the h5 files that have particle data for this halo
         self.fnames = fnames
@@ -902,8 +903,8 @@
 
     def _get_ellipsoid_parameters_basic_loadedhalo(self):
         if self.mag_A is not None:
-            return (self.mag_A, self.mag_B, self.mag_C, self.e1_vec[0],
-                self.e1_vec[1], self.e1_vec[2], self.tilt)
+            return (self.mag_A, self.mag_B, self.mag_C, self.e0_vec[0],
+                self.e0_vec[1], self.e0_vec[2], self.tilt)
         else:
             return self._get_ellipsoid_parameters_basic()
 
@@ -917,13 +918,13 @@
 
         Returns
         -------
-        tuple : (cm, mag_A, mag_B, mag_C, e1_vector, tilt)
+        tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
             The 6-tuple has in order:
               #. The center of mass as an array.
               #. mag_A as a float.
               #. mag_B as a float.
               #. mag_C as a float.
-              #. e1_vector as an array.
+              #. e0_vector as an array.
               #. tilt as a float.
 
         Examples
@@ -996,7 +997,7 @@
 
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
-        e1_vec=None, tilt=None, supp=None):
+        e0_vec=None, tilt=None, supp=None):
 
         self.pf = pf
         self.gridsize = (self.pf.domain_right_edge - \
@@ -1012,7 +1013,7 @@
         self.mag_A = mag_A
         self.mag_B = mag_B
         self.mag_C = mag_C
-        self.e1_vec = e1_vec
+        self.e0_vec = e0_vec
         self.tilt = tilt
         self.bin_count = None
         self.overdensity = None
@@ -1256,8 +1257,8 @@
                                "x","y","z", "center-of-mass",
                                "x","y","z",
                                "vx","vy","vz","max_r","rms_v",
-                               "mag_A", "mag_B", "mag_C", "e1_vec0",
-                               "e1_vec1", "e1_vec2", "tilt", "\n"]))
+                               "mag_A", "mag_B", "mag_C", "e0_vec0",
+                               "e0_vec1", "e0_vec2", "tilt", "\n"]))
 
         for group in self:
             f.write("%10i\t" % group.id)
@@ -1569,17 +1570,17 @@
                 mag_A = float(line[15])
                 mag_B = float(line[16])
                 mag_C = float(line[17])
-                e1_vec0 = float(line[18])
-                e1_vec1 = float(line[19])
-                e1_vec2 = float(line[20])
-                e1_vec = np.array([e1_vec0, e1_vec1, e1_vec2])
+                e0_vec0 = float(line[18])
+                e0_vec1 = float(line[19])
+                e0_vec2 = float(line[20])
+                e0_vec = np.array([e0_vec0, e0_vec1, e0_vec2])
                 tilt = float(line[21])
                 self._groups.append(LoadedHalo(self.pf, halo, size = size,
                     CoM = CoM,
                     max_dens_point = max_dens_point,
                     group_total_mass = group_total_mass, max_radius = max_radius,
                     bulk_vel = bulk_vel, rms_vel = rms_vel, fnames = fnames,
-                    mag_A = mag_A, mag_B = mag_B, mag_C = mag_C, e1_vec = e1_vec,
+                    mag_A = mag_A, mag_B = mag_B, mag_C = mag_C, e0_vec = e0_vec,
                     tilt = tilt))
             else:
                 mylog.error("I am unable to parse this line. Too many or too few items. %s" % orig)

diff -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 -r 21ffbfd2081d180d251540df7e3d986a15d117c7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2669,14 +2669,14 @@
         i = 0
         for grid in self._grids:
             pointI = self._get_point_indices(grid)
-            np = pointI[0].ravel().size
+            npoints = pointI[0].ravel().size
             if grid.has_key(field):
                 new_field = grid[field]
             else:
                 new_field = np.ones(grid.ActiveDimensions, dtype=dtype) * default_val
-            new_field[pointI] = self[field][i:i+np]
+            new_field[pointI] = self[field][i:i+npoints]
             grid[field] = new_field
-            i += np
+            i += npoints
 
     def _is_fully_enclosed(self, grid):
         return np.all(self._get_cut_mask)
@@ -3587,23 +3587,23 @@
         self._tilt = tilt
 
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = np.arctan(e0[1] / e0[0])
+        t1 = np.arctan(-e0[1] / e0[0])
         # rotate e0 by -t1
-        RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
-        r1 = (e0 * RZ).sum(axis = 1)
+        RZ = get_rotation_matrix(t1, (0,0,1))
+        r1 = np.dot(RZ, e0)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = np.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(r1[2] / r1[0])
         """
         calculate the original e1
         given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis=1)
-        e1 = (e1 * RY).sum(axis=1)
-        e1 = (e1 * RZ).sum(axis=1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0))
+        RY = get_rotation_matrix(-t2,   (0, 1, 0))
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1))
+        e1 = np.dot(RX, (0,1,0))
+        e1 = np.dot(RY, e1)
+        e1 = np.dot(RZ, e1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1

diff -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 -r 21ffbfd2081d180d251540df7e3d986a15d117c7 yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -246,10 +246,8 @@
     for n in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs=n)
         pf.h
-        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-            np.array([0.1]*3))
-        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-            np.array([0.1]*3))
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
         # Store the original indices
         i1 = ell1['ID']
         i1.sort()
@@ -287,10 +285,8 @@
     for n in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs=n)
         pf.h
-        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-            np.array([0.1]*3))
-        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
-            np.array([0.1]*3))
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
         # Get indices of both.
         i1 = ell1['ID']
         i2 = ell2['ID']

diff -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 -r 21ffbfd2081d180d251540df7e3d986a15d117c7 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -109,6 +109,9 @@
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=fontsize, fname=font_path)
         self._font_color = None
+        self._xlabel = None
+        self._ylabel = None
+        self._colorbarlabel = None
 
     @invalidate_plot
     def set_log(self, field, log):
@@ -475,3 +478,67 @@
             img = base64.b64encode(self.plots[field]._repr_png_())
             ret += '<img src="data:image/png;base64,%s"><br>' % img
         return ret
+
+    def set_xlabel(self, x_title, fontsize=18):
+        r"""
+        Allow the user to modify the X-axis title
+        Defaults to the global value. Fontsize defaults 
+        to 18.
+        
+        Parameters
+        ----------
+        x_title: str
+              The new string for the x-axis. This is a required argument. 
+
+        fontsize: float
+              Fontsize for the x-axis title
+
+        >>>  plot.set_xtitle("H2I Number Density (cm$^{-3}$)")
+
+        """
+        for f in self.plots:
+            self.plots[f].axes.xaxis.set_label_text(x_title, fontsize=fontsize)
+        self._xlabel = x_title
+
+    def set_ylabel(self, y_title, fontsize=18):
+        r"""
+        Allow the user to modify the Y-axis title
+        Defaults to the global value. Fontsize defaults 
+        to 18.
+        
+        Parameters
+        ----------
+        y_title: str
+              The new string for the y-axis. This is a required argument. 
+        fontsize: float
+              Fontsize for the y-axis title
+
+        >>>  plot.set_ytitle("Temperature (K)")
+
+        """
+        for f in self.plots:
+            self.plots[f].axes.yaxis.set_label_text(y_title, fontsize=fontsize)
+        self._ylabel = y_title
+
+    def set_colorbar_label(self, z_title, fontsize=18):
+        r"""
+        Allow the user to modify the Z-axis title
+        Defaults to the global value. Fontsize defaults 
+        to 18.
+        
+        Parameters
+        ----------
+        z_title: str
+              The new string for the colorbar. This is a required argument.
+        fontsize: float
+              Fontsize for the z-axis title
+
+        >>>  plot.set_ztitle("Enclosed Gas Mass ($M_{\odot}$)")
+
+        """
+        for f in self.plots:
+            self.plots[f].cax.yaxis.set_label_text(z_title, fontsize=fontsize)
+        self._colorbarlabel = z_title
+
+    def _get_axes_labels(self):
+        return(self._xlabel, self._ylabel, self._colorbarlabel)

diff -r 2ec77aca4ee81674bbc5aad688776b9e59940f37 -r 21ffbfd2081d180d251540df7e3d986a15d117c7 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -517,11 +517,10 @@
                          weight_field=None)
     >>> plot.save()
 
-    >>> # Change plot properties.
+    >>> # Change plot properties. 
     >>> plot.set_cmap("CellMassMsun", "jet")
     >>> plot.set_zlim("CellMassMsun", 1e8, 1e13)
     >>> plot.set_title("CellMassMsun", "This is a phase plot")
-    
     """
     x_log = None
     y_log = None
@@ -532,6 +531,7 @@
     _plot_valid = False
     _plot_type = 'Phase'
 
+
     def __init__(self, data_source, x_field, y_field, z_fields,
                  weight_field="CellMassMsun", x_bins=128, y_bins=128,
                  accumulation=False, fractional=False,
@@ -540,6 +540,14 @@
         self.z_log = {}
         self.z_title = {}
         self._initfinished = False
+        self._xlimits = [0,0]
+        self._ylimits = [0,0]
+        self._setxlims = False
+        self._setylims = False
+        self._plottext = ""
+        self._textxpos = 0.0
+        self._textypos = 0.0
+
 
         if profile is None:
             profile = create_profile(data_source,
@@ -562,10 +570,11 @@
         xfi = pf.field_info[field_x]
         yfi = pf.field_info[field_y]
         zfi = pf.field_info[field_z]
+       
         x_title = self.x_title or self._get_field_label(field_x, xfi)
         y_title = self.y_title or self._get_field_label(field_y, yfi)
         z_title = self.z_title.get(field_z, None) or \
-                    self._get_field_label(field_z, zfi)
+            self._get_field_label(field_z, zfi)
         return (x_title, y_title, z_title)
 
     def _get_field_label(self, field, field_info):
@@ -612,7 +621,16 @@
 
             size = (self.figure_size, self.figure_size)
             x_scale, y_scale, z_scale = self._get_field_log(f, self.profile)
+            x_label, y_label, z_label = self._get_axes_labels()
             x_title, y_title, z_title = self._get_field_title(f, self.profile)
+            #If the labels are set they take precedence
+            if x_label is not None:
+                x_title = x_label
+            if y_label is not None:
+                y_title = y_label
+            if z_label is not None:
+                z_title = z_label
+
             if f in self.plots:
                 zlim = [self.plots[f].zmin, self.plots[f].zmax]
             else:
@@ -627,16 +645,24 @@
                                          x_scale, y_scale, z_scale,
                                          self._colormaps[f], zlim, size, fp.get_size(),
                                          fig, axes, cax)
+
             self.plots[f].axes.xaxis.set_label_text(x_title)
             self.plots[f].axes.yaxis.set_label_text(y_title)
             self.plots[f].cax.yaxis.set_label_text(z_title)
+            if(self._setxlims == True):
+                self.plots[f].axes.set_xlim(self._xlimits[0], self._xlimits[1])
+            if(self._setylims == True):
+                self.plots[f].axes.set_ylim(self._ylimits[0], self._ylimits[1])
+           
+            self.plots[f].axes.text(self._textxpos, self._textypos, self._plottext,
+                                    fontproperties=self._font_properties)
             if z_scale == "log":
                 self._field_transform[f] = log_transform
             else:
                 self._field_transform[f] = linear_transform
             if f in self.plot_title:
                 self.plots[f].axes.set_title(self.plot_title[f])
-
+               
             if self._font_color is not None:
                 ax = self.plots[f].axes
                 cbax = self.plots[f].cb.ax
@@ -648,6 +674,90 @@
                     label.set_color(self._font_color)
         self._plot_valid = True
 
+
+    def set_xlim(self, xmin=None, xmax=None):
+        r"""
+        Sets the x-axis limits on the Phase plot. 
+        Defaults to None leaving the axis unchanged
+        Parameters
+        ----------
+        xmin: float
+              The minimum value on the x-axis
+        xmax: float
+              The maximum value on the x-axis
+
+        >>> plot.set_xlim(5e-21, 1e5)
+        """
+
+        for f, data in self.profile.field_data.items():
+            axes = None
+            if f in self.plots:
+                if self.plots[f].figure is not None:
+                    axes = self.plots[f].axes
+
+                self.plots[f].axes.set_xlim(xmin, xmax)
+        self._setxlims = True
+        self._xlimits[0] = xmin
+        self._xlimits[1] = xmax
+
+    def set_ylim(self, ymin=None, ymax=None):
+        r"""
+        Sets the y-axis limits on the Phase plot. 
+        Defaults to None leaving the axis unchanged
+        Parameters
+        ----------
+        ymin: float
+              The minimum value on the y-axis
+        ymax: float
+              The maximum value on the y-axis
+
+        >>> plot.set_ylim(1e1, 1e5)
+
+        """
+       
+        for f, data in self.profile.field_data.items():
+            axes = None
+            if f in self.plots:
+                if self.plots[f].figure is not None:
+                    axes = self.plots[f].axes
+
+                self.plots[f].axes.set_ylim(ymin, ymax)
+        self._setylims = True
+        self._ylimits[0] = ymin
+        self._ylimits[1] = ymax
+   
+    def add_text(self, text_str, xpos, ypos, fontsize=18, **kwargs):
+        r"""
+        Allow the user to insert text onto the plot
+        The x-position and y-position must be given as well as the text string. 
+        Add text_str plot at location x, y, data coordinates (see example below).
+        Fontsize defaults to 18.
+        
+        Parameters
+        ----------
+        text_str: str
+              The text to insert onto the plot. Required argument. 
+        xpos: float
+              Position on plot in x-coordinates. Required argument. 
+        ypos: float
+              Position on plot in y-coordinates. Required argument. 
+        fontsize: float
+              Fontsize for the text (defaults to 18)
+
+        >>>  plot.text(1e-15, 5e4, "Hello YT")
+
+        """
+        for f, data in self.profile.field_data.items():
+            axes = None
+            if f in self.plots:
+                if self.plots[f].figure is not None:
+                    axes = self.plots[f].axes
+                    self.plots[f].axes.text(xpos, ypos, text_str,
+                                            fontproperties=self._font_properties)
+        self._plottext = text_str
+        self._textxpos = xpos
+        self._textypos = ypos
+
     def save(self, name=None, mpl_kwargs=None):
         r"""
         Saves a 2d profile plot.


https://bitbucket.org/yt_analysis/yt/commits/2ac4060fa26a/
Changeset:   2ac4060fa26a
Branch:      yt-3.0
User:        atmyers
Date:        2014-08-06 01:15:03+00:00
Summary:     merging
Affected #:  10 files

diff -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c yt/frontends/charm/api.py
--- /dev/null
+++ b/yt/frontends/charm/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.charm
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      CharmGrid, \
+      CharmHierarchy, \
+      CharmStaticOutput
+
+from .fields import \
+      CharmFieldInfo, \
+      add_charm_field
+
+from .io import \
+      IOHandlerCharmHDF5

diff -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c yt/frontends/charm/data_structures.py
--- /dev/null
+++ b/yt/frontends/charm/data_structures.py
@@ -0,0 +1,341 @@
+"""
+Data structures for Charm.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import re
+import os
+import weakref
+import numpy as np
+
+from collections import \
+     defaultdict
+from string import \
+     strip, \
+     rstrip
+from stat import \
+     ST_CTIME
+
+from .definitions import \
+     charm2enzoDict, \
+     yt2charmFieldsDict, \
+     parameterDict \
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+     AMRGridPatch
+from yt.data_objects.hierarchy import \
+     AMRHierarchy
+from yt.data_objects.static_output import \
+     StaticOutput
+from yt.utilities.definitions import \
+     mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     parallel_root_only
+from yt.utilities.io_handler import \
+    io_registry
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import \
+    CharmFieldInfo, Charm2DFieldInfo, Charm1DFieldInfo, \
+    add_charm_field, add_charm_2d_field, add_charm_1d_field, \
+    KnownCharmFields
+
+class CharmGrid(AMRGridPatch):
+    _id_offset = 0
+    __slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level, start, stop):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.ActiveDimensions = stop - start + 1
+
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return np.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+class CharmHierarchy(AMRHierarchy):
+
+    grid = CharmGrid
+    _data_file = None
+
+    def __init__(self,pf,data_style='charm_hdf5'):
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.data_style = data_style
+
+        if pf.dimensionality == 1:
+            self.data_style = "charm1d_hdf5"
+        if pf.dimensionality == 2:
+            self.data_style = "charm2d_hdf5"
+
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = os.path.abspath(
+            self.parameter_file.parameter_filename)
+        self.directory = pf.fullpath
+        self._handle = pf._handle
+
+        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
+        AMRHierarchy.__init__(self,pf,data_style)
+        self._read_particles()
+
+    def _read_particles(self):
+        
+        self.num_particles = 0
+        particles_per_grid = []
+        for key, val in self._handle.items():
+            if key.startswith('level'):
+                level_particles = val['particles:offsets'][:]
+                self.num_particles += level_particles.sum()
+                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
+
+        for i, grid in enumerate(self.grids):
+            self.grids[i].NumberOfParticles = particles_per_grid[i]
+            self.grid_particle_count[i] = particles_per_grid[i]
+
+        assert(self.num_particles == self.grid_particle_count.sum())
+
+    def _detect_fields(self):
+        self.field_list = []
+        for key, val in self._handle.attrs.items():
+            if key.startswith("component"):
+                self.field_list.append(val)
+          
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._handle[lev]['Processors'].len()
+
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.parameter_file.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class CharmStaticOutput(StaticOutput):
+    _hierarchy_class = CharmHierarchy
+    _fieldinfo_fallback = CharmFieldInfo
+    _fieldinfo_known = KnownCharmFields
+
+    def __init__(self, filename, data_style='charm_hdf5',
+                 storage_filename = None, ini_filename = None):
+        self._handle = h5py.File(filename,'r')
+        self.current_time = self._handle['level_0'].attrs['time']
+        self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
+        StaticOutput.__init__(self,filename,data_style)
+        self.storage_filename = storage_filename
+        self.cosmological_simulation = False
+
+        # These are parameters that I very much wish to get rid of.
+        self.parameters["HydroMethod"] = 'charm' # always PPM DE
+        self.parameters["DualEnergyFormalism"] = 0 
+        self.parameters["EOSType"] = -1 # default
+
+    def __del__(self):
+        self._handle.close()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        seconds = 1 #self["Time"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = seconds / sec_conversion[unit]
+        for key in yt2charmFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    def _parse_parameter_file(self):
+        
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_left_edge = self.__calc_left_edge()
+        self.domain_right_edge = self.__calc_right_edge()
+        self.domain_dimensions = self.__calc_domain_dimensions()
+
+        if self.dimensionality == 1:
+            self._fieldinfo_fallback = Charm1DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self._fieldinfo_fallback = Charm2DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+        
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self.periodicity = (True,) * self.dimensionality
+
+    def __calc_left_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        return LE
+
+    def __calc_right_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return RE
+
+    def __calc_domain_dimensions(self):
+        fileh = self._handle
+        D = self.dimensionality
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return R_index - L_index
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            valid = "Charm_global" in fileh["/"]
+            fileh.close()
+            return valid
+        except:
+            pass
+        return False
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)

diff -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c yt/frontends/charm/definitions.py
--- /dev/null
+++ b/yt/frontends/charm/definitions.py
@@ -0,0 +1,54 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                                 }
+
+charm2enzoDict = {"GAMMA": "Gamma",
+                  "Ref_ratio": "RefineBy"
+                                    }
+
+yt2charmFieldsDict = {}
+charm2ytFieldsDict = {}
+

diff -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c yt/frontends/charm/fields.py
--- /dev/null
+++ b/yt/frontends/charm/fields.py
@@ -0,0 +1,150 @@
+"""
+Charm-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+import numpy as np
+
+CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = CharmFieldInfo.add_field
+
+KnownCharmFields = FieldInfoContainer()
+add_charm_field = KnownCharmFields.add_field
+
+add_charm_field("potential", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("potential")],
+                units=r"")
+
+add_charm_field("density", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("density")],
+                units=r"")
+
+add_charm_field("gravitational_field_x", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_x")],
+                units=r"")
+
+add_charm_field("gravitational_field_y", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_y")],
+                units=r"")
+
+add_charm_field("gravitational_field_z", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_z")],
+                units=r"")
+
+def _Density(field, data):
+    return data["density"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+        
+    return _Particles
+
+_particle_field_list = ["mass",
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "velocity_x",
+                        "velocity_y",
+                        "velocity_z",
+                        "acceleration_x",
+                        "acceleration_y",
+                        "acceleration_z"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)
+
+#do overrides for 2D
+
+Charm2DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_2d_field = Charm2DFieldInfo.add_field
+
+def _gravitational_field_z(field, data):
+    return np.zeros(data['gravitational_field_x'].shape,
+                    dtype='float64')
+add_charm_2d_field("gravitational_field_z", function=_gravitational_field_z)
+
+def _particle_position_z(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+add_charm_2d_field("particle_position_z", function=_particle_position_z)
+
+def _particle_velocity_z(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+add_charm_2d_field("particle_velocity_z", function=_particle_velocity_z)
+
+def _particle_acceleration_z(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+add_charm_2d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+#do overrides for 1D
+
+Charm1DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_1d_field = Charm1DFieldInfo.add_field
+
+def _gravitational_field_y(field, data):
+    return np.zeros(data['gravitational_field_y'].shape,
+                    dtype='float64')
+
+def _particle_position_y(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+
+def _particle_velocity_y(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+
+def _particle_acceleration_y(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+
+add_charm_1d_field("gravitational_field_z", function=_gravitational_field_z)
+add_charm_1d_field("gravitational_field_y", function=_gravitational_field_y)
+
+add_charm_1d_field("particle_position_z", function=_particle_position_z)
+add_charm_1d_field("particle_velocity_z", function=_particle_velocity_z)
+add_charm_1d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+add_charm_1d_field("particle_position_y", function=_particle_position_y)
+add_charm_1d_field("particle_velocity_y", function=_particle_velocity_y)
+add_charm_1d_field("particle_acceleration_y", function=_particle_acceleration_y)
\ No newline at end of file

diff -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c yt/frontends/charm/io.py
--- /dev/null
+++ b/yt/frontends/charm/io.py
@@ -0,0 +1,127 @@
+"""
+The data-file handling functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import h5py
+import os
+import re
+import numpy as np
+
+from yt.utilities.io_handler import \
+           BaseIOHandler
+
+class IOHandlerCharmHDF5(BaseIOHandler):
+    _data_style = "charm_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'position_z': 2,
+                                      'velocity_x': 3,
+                                      'velocity_y': 4,
+                                      'velocity_z': 5,
+                                      'acceleration_x': 6,
+                                      'acceleration_y': 7,
+                                      'acceleration_z': 8,
+                                      'mass': 9}
+
+    _field_dict = None
+    @property
+    def field_dict(self):
+        if self._field_dict is not None:
+            return self._field_dict
+        field_dict = {}
+        for key, val in self._handle.attrs.items():
+            if key.startswith('component_'):
+                comp_number = int(re.match('component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._field_dict = field_dict
+        return self._field_dict
+        
+    def _read_field_names(self, grid):
+        ncomp = int(self._handle['/'].attrs['num_components'])
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
+    
+    def _read_data(self,grid,field):
+
+        lstring = 'level_%i' % grid.Level
+        lev = self._handle[lstring]
+        dims = grid.ActiveDimensions
+        boxsize = dims.prod()
+        
+        grid_offset = lev[self._offset_string][grid._level_id]
+        start = grid_offset+self.field_dict[field]*boxsize
+        stop = start + boxsize
+        data = lev[self._data_string][start:stop]
+        
+        return data.reshape(dims, order='F')
+
+    def _read_particles(self, grid, name):
+
+        field_index = self._particle_field_index[name]
+        lev = 'level_%s' % grid.Level
+
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
+        items_per_particle = len(self._particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+
+        # convert between the global grid id and the id on this level            
+        grid_levels = np.array([g.Level for g in self.pf.h.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
+        return data[field_index::items_per_particle]
+
+class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm2d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'velocity_x': 2,
+                                      'velocity_y': 3,
+                                      'acceleration_x': 4,
+                                      'acceleration_y': 5,
+                                      'mass': 6}
+
+
+class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm1d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'velocity_x': 1,
+                                      'acceleration_x': 2,
+                                      'mass': 3}
\ No newline at end of file

diff -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c yt/frontends/charm/setup.py
--- /dev/null
+++ b/yt/frontends/charm/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('charm', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -467,8 +467,9 @@
         if not pluto_ini_file_exists:
             try:
                 fileh = h5py.File(args[0],'r')
-                valid = "Chombo_global" in fileh["/"]
                 valid = 'CeilVA_mass' in fileh.attrs.keys()
+                valid = "Chombo_global" in fileh["/"] and "Charm_global" not in fileh["/"]
+                valid = valid and 'CeilVA_mass' in fileh.attrs.keys()
                 fileh.close()
                 return valid
             except:
@@ -521,4 +522,4 @@
                 return valid
             except:
                 pass
-        return False
\ No newline at end of file
+        return False

diff -r 1434d4642694c705a4bf6c1b3b35bf0b3d7ef02d -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -26,6 +26,8 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")
+    config.add_subpackage("pluto")
+    config.add_subpackage("charm")
     config.add_subpackage("flash/tests")
     config.add_subpackage("enzo/tests")
     config.add_subpackage("stream/tests")


https://bitbucket.org/yt_analysis/yt/commits/a6019f1e68e2/
Changeset:   a6019f1e68e2
Branch:      yt
User:        atmyers
Date:        2014-08-06 01:18:05+00:00
Summary:     merging
Affected #:  18 files

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -195,7 +195,7 @@
 
     def get_label(self, projected=False):
         """
-        Return a data label for the given field, inluding units.
+        Return a data label for the given field, including units.
         """
         name = self.name[1]
         if self.display_name is not None:

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/charm/api.py
--- /dev/null
+++ b/yt/frontends/charm/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.charm
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      CharmGrid, \
+      CharmHierarchy, \
+      CharmStaticOutput
+
+from .fields import \
+      CharmFieldInfo, \
+      add_charm_field
+
+from .io import \
+      IOHandlerCharmHDF5

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/charm/data_structures.py
--- /dev/null
+++ b/yt/frontends/charm/data_structures.py
@@ -0,0 +1,341 @@
+"""
+Data structures for Charm.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import re
+import os
+import weakref
+import numpy as np
+
+from collections import \
+     defaultdict
+from string import \
+     strip, \
+     rstrip
+from stat import \
+     ST_CTIME
+
+from .definitions import \
+     charm2enzoDict, \
+     yt2charmFieldsDict, \
+     parameterDict \
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+     AMRGridPatch
+from yt.data_objects.hierarchy import \
+     AMRHierarchy
+from yt.data_objects.static_output import \
+     StaticOutput
+from yt.utilities.definitions import \
+     mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     parallel_root_only
+from yt.utilities.io_handler import \
+    io_registry
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import \
+    CharmFieldInfo, Charm2DFieldInfo, Charm1DFieldInfo, \
+    add_charm_field, add_charm_2d_field, add_charm_1d_field, \
+    KnownCharmFields
+
+class CharmGrid(AMRGridPatch):
+    _id_offset = 0
+    __slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level, start, stop):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.ActiveDimensions = stop - start + 1
+
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return np.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+class CharmHierarchy(AMRHierarchy):
+
+    grid = CharmGrid
+    _data_file = None
+
+    def __init__(self,pf,data_style='charm_hdf5'):
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.data_style = data_style
+
+        if pf.dimensionality == 1:
+            self.data_style = "charm1d_hdf5"
+        if pf.dimensionality == 2:
+            self.data_style = "charm2d_hdf5"
+
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = os.path.abspath(
+            self.parameter_file.parameter_filename)
+        self.directory = pf.fullpath
+        self._handle = pf._handle
+
+        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
+        AMRHierarchy.__init__(self,pf,data_style)
+        self._read_particles()
+
+    def _read_particles(self):
+        
+        self.num_particles = 0
+        particles_per_grid = []
+        for key, val in self._handle.items():
+            if key.startswith('level'):
+                level_particles = val['particles:offsets'][:]
+                self.num_particles += level_particles.sum()
+                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
+
+        for i, grid in enumerate(self.grids):
+            self.grids[i].NumberOfParticles = particles_per_grid[i]
+            self.grid_particle_count[i] = particles_per_grid[i]
+
+        assert(self.num_particles == self.grid_particle_count.sum())
+
+    def _detect_fields(self):
+        self.field_list = []
+        for key, val in self._handle.attrs.items():
+            if key.startswith("component"):
+                self.field_list.append(val)
+          
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._handle[lev]['Processors'].len()
+
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.parameter_file.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class CharmStaticOutput(StaticOutput):
+    _hierarchy_class = CharmHierarchy
+    _fieldinfo_fallback = CharmFieldInfo
+    _fieldinfo_known = KnownCharmFields
+
+    def __init__(self, filename, data_style='charm_hdf5',
+                 storage_filename = None, ini_filename = None):
+        self._handle = h5py.File(filename,'r')
+        self.current_time = self._handle['level_0'].attrs['time']
+        self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
+        StaticOutput.__init__(self,filename,data_style)
+        self.storage_filename = storage_filename
+        self.cosmological_simulation = False
+
+        # These are parameters that I very much wish to get rid of.
+        self.parameters["HydroMethod"] = 'charm' # always PPM DE
+        self.parameters["DualEnergyFormalism"] = 0 
+        self.parameters["EOSType"] = -1 # default
+
+    def __del__(self):
+        self._handle.close()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        seconds = 1 #self["Time"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = seconds / sec_conversion[unit]
+        for key in yt2charmFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    def _parse_parameter_file(self):
+        
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_left_edge = self.__calc_left_edge()
+        self.domain_right_edge = self.__calc_right_edge()
+        self.domain_dimensions = self.__calc_domain_dimensions()
+
+        if self.dimensionality == 1:
+            self._fieldinfo_fallback = Charm1DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self._fieldinfo_fallback = Charm2DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+        
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self.periodicity = (True,) * self.dimensionality
+
+    def __calc_left_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        return LE
+
+    def __calc_right_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return RE
+
+    def __calc_domain_dimensions(self):
+        fileh = self._handle
+        D = self.dimensionality
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return R_index - L_index
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            valid = "Charm_global" in fileh["/"]
+            fileh.close()
+            return valid
+        except:
+            pass
+        return False
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/charm/definitions.py
--- /dev/null
+++ b/yt/frontends/charm/definitions.py
@@ -0,0 +1,54 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                                 }
+
+charm2enzoDict = {"GAMMA": "Gamma",
+                  "Ref_ratio": "RefineBy"
+                                    }
+
+yt2charmFieldsDict = {}
+charm2ytFieldsDict = {}
+

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/charm/fields.py
--- /dev/null
+++ b/yt/frontends/charm/fields.py
@@ -0,0 +1,150 @@
+"""
+Charm-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+import numpy as np
+
+CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = CharmFieldInfo.add_field
+
+KnownCharmFields = FieldInfoContainer()
+add_charm_field = KnownCharmFields.add_field
+
+add_charm_field("potential", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("potential")],
+                units=r"")
+
+add_charm_field("density", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("density")],
+                units=r"")
+
+add_charm_field("gravitational_field_x", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_x")],
+                units=r"")
+
+add_charm_field("gravitational_field_y", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_y")],
+                units=r"")
+
+add_charm_field("gravitational_field_z", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_z")],
+                units=r"")
+
+def _Density(field, data):
+    return data["density"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+        
+    return _Particles
+
+_particle_field_list = ["mass",
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "velocity_x",
+                        "velocity_y",
+                        "velocity_z",
+                        "acceleration_x",
+                        "acceleration_y",
+                        "acceleration_z"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)
+
+#do overrides for 2D
+
+Charm2DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_2d_field = Charm2DFieldInfo.add_field
+
+def _gravitational_field_z(field, data):
+    return np.zeros(data['gravitational_field_x'].shape,
+                    dtype='float64')
+add_charm_2d_field("gravitational_field_z", function=_gravitational_field_z)
+
+def _particle_position_z(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+add_charm_2d_field("particle_position_z", function=_particle_position_z)
+
+def _particle_velocity_z(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+add_charm_2d_field("particle_velocity_z", function=_particle_velocity_z)
+
+def _particle_acceleration_z(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+add_charm_2d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+#do overrides for 1D
+
+Charm1DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_1d_field = Charm1DFieldInfo.add_field
+
+def _gravitational_field_y(field, data):
+    return np.zeros(data['gravitational_field_y'].shape,
+                    dtype='float64')
+
+def _particle_position_y(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+
+def _particle_velocity_y(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+
+def _particle_acceleration_y(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+
+add_charm_1d_field("gravitational_field_z", function=_gravitational_field_z)
+add_charm_1d_field("gravitational_field_y", function=_gravitational_field_y)
+
+add_charm_1d_field("particle_position_z", function=_particle_position_z)
+add_charm_1d_field("particle_velocity_z", function=_particle_velocity_z)
+add_charm_1d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+add_charm_1d_field("particle_position_y", function=_particle_position_y)
+add_charm_1d_field("particle_velocity_y", function=_particle_velocity_y)
+add_charm_1d_field("particle_acceleration_y", function=_particle_acceleration_y)
\ No newline at end of file

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/charm/io.py
--- /dev/null
+++ b/yt/frontends/charm/io.py
@@ -0,0 +1,127 @@
+"""
+The data-file handling functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import h5py
+import os
+import re
+import numpy as np
+
+from yt.utilities.io_handler import \
+           BaseIOHandler
+
+class IOHandlerCharmHDF5(BaseIOHandler):
+    _data_style = "charm_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'position_z': 2,
+                                      'velocity_x': 3,
+                                      'velocity_y': 4,
+                                      'velocity_z': 5,
+                                      'acceleration_x': 6,
+                                      'acceleration_y': 7,
+                                      'acceleration_z': 8,
+                                      'mass': 9}
+
+    _field_dict = None
+    @property
+    def field_dict(self):
+        if self._field_dict is not None:
+            return self._field_dict
+        field_dict = {}
+        for key, val in self._handle.attrs.items():
+            if key.startswith('component_'):
+                comp_number = int(re.match('component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._field_dict = field_dict
+        return self._field_dict
+        
+    def _read_field_names(self, grid):
+        ncomp = int(self._handle['/'].attrs['num_components'])
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
+    
+    def _read_data(self,grid,field):
+
+        lstring = 'level_%i' % grid.Level
+        lev = self._handle[lstring]
+        dims = grid.ActiveDimensions
+        boxsize = dims.prod()
+        
+        grid_offset = lev[self._offset_string][grid._level_id]
+        start = grid_offset+self.field_dict[field]*boxsize
+        stop = start + boxsize
+        data = lev[self._data_string][start:stop]
+        
+        return data.reshape(dims, order='F')
+
+    def _read_particles(self, grid, name):
+
+        field_index = self._particle_field_index[name]
+        lev = 'level_%s' % grid.Level
+
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
+        items_per_particle = len(self._particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+
+        # convert between the global grid id and the id on this level            
+        grid_levels = np.array([g.Level for g in self.pf.h.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
+        return data[field_index::items_per_particle]
+
+class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm2d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'velocity_x': 2,
+                                      'velocity_y': 3,
+                                      'acceleration_x': 4,
+                                      'acceleration_y': 5,
+                                      'mass': 6}
+
+
+class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm1d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'velocity_x': 1,
+                                      'acceleration_x': 2,
+                                      'mass': 3}
\ No newline at end of file

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/charm/setup.py
--- /dev/null
+++ b/yt/frontends/charm/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('charm', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -18,10 +18,16 @@
       ChomboHierarchy, \
       ChomboDataset, \
       Orion2Hierarchy, \
-      Orion2Dataset
+      Orion2Dataset, \
+      ChomboPICHierarchy, \
+      ChomboPICDataset
 
 from .fields import \
-      ChomboFieldInfo
+      ChomboFieldInfo, \
+      Orion2FieldInfo, \
+      ChomboPICFieldInfo1D, \
+      ChomboPICFieldInfo2D, \
+      ChomboPICFieldInfo3D
 
 from .io import \
       IOHandlerChomboHDF5

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -42,7 +42,8 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo, Orion2FieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D 
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -347,6 +348,7 @@
                 valid = "Chombo_global" in fileh["/"]
                 # ORION2 simulations should always have this:
                 valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
+                valid = valid and not ('Charm_global' in fileh.keys())
                 fileh.close()
                 return valid
             except:
@@ -465,11 +467,59 @@
         if not pluto_ini_file_exists:
             try:
                 fileh = h5py.File(args[0],'r')
-                valid = "Chombo_global" in fileh["/"]
                 valid = 'CeilVA_mass' in fileh.attrs.keys()
+                valid = "Chombo_global" in fileh["/"] and "Charm_global" not in fileh["/"]
+                valid = valid and 'CeilVA_mass' in fileh.attrs.keys()
                 fileh.close()
                 return valid
             except:
                 pass
         return False
 
+class ChomboPICHierarchy(ChomboHierarchy):
+
+    def __init__(self, pf, dataset_type="chombo_hdf5"):
+        ChomboHierarchy.__init__(self, pf, dataset_type)
+
+class ChomboPICDataset(ChomboDataset):
+
+    _index_class = ChomboPICHierarchy
+    _field_info_class = ChomboPICFieldInfo3D
+
+    def __init__(self, filename, dataset_type='chombo_hdf5',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
+
+        if self.dimensionality == 1:
+            self._field_info_class = ChomboPICFieldInfo1D
+
+        if self.dimensionality == 2:
+            self._field_info_class = ChomboPICFieldInfo2D
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+        
+        if orion2_ini_file_exists:
+            return True
+
+        if not pluto_ini_file_exists:
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Charm_global" in fileh["/"]
+                fileh.close()
+                return valid
+            except:
+                pass
+        return False

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -88,3 +88,100 @@
                        units = "erg/cm**3")
         self.add_field("temperature", function=_temperature,
                        units="K")
+
+class ChomboPICFieldInfo3D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+        ("gravitational_field_z", ("code_length / code_time**2", ["gravitational-field-z"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+        ("particle_velocity_z", ("code_length / code_time", [], None)),
+    )
+
+def _dummy_position(field, data):
+    return 0.5*np.ones_like(data['particle_position_x'])
+
+def _dummy_velocity(field, data):
+    return np.zeros_like(data['particle_velocity_x'])
+
+def _dummy_field(field, data):
+    return 0.0 * data['gravitational_field_x']
+
+fluid_field_types = ['chombo', 'gas']
+particle_field_types = ['io', 'all']
+
+class ChomboPICFieldInfo2D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, pf, field_list):
+        super(ChomboPICFieldInfo2D, self).__init__(pf, field_list)
+
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+        
+        for ptype in particle_field_types:                
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+
+class ChomboPICFieldInfo1D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, pf, field_list):
+        super(ChomboPICFieldInfo1D, self).__init__(pf, field_list)
+        
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_y'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                    units = "code_length / code_time**2")
+
+        for ptype in particle_field_types:
+            self.add_field((ptype, "particle_position_y"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_velocity_y"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -26,10 +26,22 @@
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, ds, *args, **kwargs):
-        BaseIOHandler.__init__(self, ds, *args, **kwargs)
-        self.ds = ds
-        self._handle = ds._handle
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self._read_ghost_info()
+
+    def _read_ghost_info(self):
+        try:
+            self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
+            # pad with zeros if the dataset is low-dimensional
+            self.ghost += (3 - self.dim)*(0,)
+            self.ghost = np.array(self.ghost)
+        except KeyError:
+            # assume zero ghosts if outputGhosts not present
+            self.ghost = np.array(self.dim)
 
     _field_dict = None
     @property
@@ -62,18 +74,20 @@
         fns = [c[1] for c in f.attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
-
         lstring = 'level_%i' % grid.Level
         lev = self._handle[lstring]
         dims = grid.ActiveDimensions
-        boxsize = dims.prod()
+        shape = grid.ActiveDimensions + 2*self.ghost
+        boxsize = shape.prod()
         
         grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
+        data_no_ghost = data.reshape(shape, order='F')
+        ghost_slice = [slice(g, d-g, None) for g, d in zip(self.ghost, grid.ActiveDimensions)]
+        ghost_slice = ghost_slice[0:self.dim]
+        return data_no_ghost[ghost_slice]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -83,16 +97,8 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             grid = chunks[0].objs[0]
-            lstring = 'level_%i' % grid.Level
-            lev = self._handle[lstring]
-            grid_offset = lev[self._offset_string][grid._level_id]
-            boxsize = grid.ActiveDimensions.prod()
             for ftype, fname in fields:
-                start = grid_offset+self.field_dict[fname]*boxsize
-                stop = start + boxsize
-                data = lev[self._data_string][start:stop]
-                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
-                                        order='F')
+                rv[ftype, fname] = self._read_data(grid, fname)
             return rv
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
@@ -108,16 +114,10 @@
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
-                lstring = 'level_%i' % g.Level
-                lev = self._handle[lstring]
-                grid_offset = lev[self._offset_string][g._level_id]
-                boxsize = g.ActiveDimensions.prod()
                 nd = 0
                 for field in fields:
-                    start = grid_offset+self.field_dict[fname]*boxsize
-                    stop = start + boxsize
-                    data = lev[self._data_string][start:stop]
-                    data = data.reshape(g.ActiveDimensions, order='F')
+                    ftype, fname = field
+                    data = self._read_data(g, fname)
                     nd = g.select(selector, data, rv[field], ind) # caches
                 ind += nd
         return rv
@@ -178,20 +178,24 @@
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, ds, *args, **kwargs):
-        BaseIOHandler.__init__(self, ds, *args, **kwargs)
-        self.ds = ds
-        self._handle = ds._handle
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self.dim = 2
+        self._read_ghost_info()
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, ds, *args, **kwargs):
-        BaseIOHandler.__init__(self, ds, *args, **kwargs)
-        self.ds = ds
-        self._handle = ds._handle   
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self.dim = 1
+        self._handle = pf._handle   
+        self._read_ghost_info()
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -45,10 +45,10 @@
 _zp_fields = ("rhs", "phi", "gravitational_field_x",
               "gravitational_field_y")
 zp = "ZeldovichPancake/plt32.2d.hdf5"
- at requires_ds(zp)
+ at requires_pf(zp)
 def test_zp():
-    ds = data_dir_load(zp)
-    yield assert_equal, str(ds), "plt32.2d.hdf5"
+    pf = data_dir_load(zp)
+    yield assert_equal, str(pf), "plt32.2d.hdf5"
     for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
         test_tb.__name__ = test.description
         yield test

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -26,6 +26,8 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")
+    config.add_subpackage("pluto")
+    config.add_subpackage("charm")
     config.add_subpackage("flash/tests")
     config.add_subpackage("enzo/tests")
     config.add_subpackage("stream/tests")

diff -r 2289aecc35992ca9153c06c79fb57be6ecb61bde -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 yt/visualization/particle_plotter.py
--- /dev/null
+++ b/yt/visualization/particle_plotter.py
@@ -0,0 +1,496 @@
+"""
+This is a simple mechanism for interfacing with Particle plots
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+import __builtin__
+import base64
+import os
+import types
+
+from functools import wraps
+from itertools import izip
+import matplotlib
+import numpy as np
+import cStringIO
+
+from .base_plot_types import ImagePlotMPL
+from .plot_container import \
+    ImagePlotContainer, \
+    log_transform, linear_transform
+from yt.data_objects.profiles import \
+    create_profile
+from yt.utilities.exceptions import \
+    YTNotInsideNotebook
+from yt.utilities.logger import ytLogger as mylog
+import _mpl_imports as mpl
+from yt.funcs import \
+    ensure_list, \
+    get_image_suffix, \
+    get_ipython_api_version
+from yt.units.unit_object import Unit
+
+def get_canvas(name):
+    suffix = get_image_suffix(name)
+    
+    if suffix == '':
+        suffix = '.png'
+    if suffix == ".png":
+        canvas_cls = mpl.FigureCanvasAgg
+    elif suffix == ".pdf":
+        canvas_cls = mpl.FigureCanvasPdf
+    elif suffix in (".eps", ".ps"):
+        canvas_cls = mpl.FigureCanvasPS
+    else:
+        mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+        canvas_cls = mpl.FigureCanvasAgg
+    return canvas_cls
+
+def invalidate_plot(f):
+    @wraps(f)
+    def newfunc(*args, **kwargs):
+        rv = f(*args, **kwargs)
+        args[0]._plot_valid = False
+        args[0]._setup_plots()
+        return rv
+    return newfunc
+
+class FigureContainer(dict):
+    def __init__(self):
+        super(FigureContainer, self).__init__()
+
+    def __missing__(self, key):
+        figure = mpl.matplotlib.figure.Figure((10, 8))
+        self[key] = figure
+        return self[key]
+
+class AxesContainer(dict):
+    def __init__(self, fig_container):
+        self.fig_container = fig_container
+        self.ylim = {}
+        super(AxesContainer, self).__init__()
+
+    def __missing__(self, key):
+        figure = self.fig_container[key]
+        self[key] = figure.add_subplot(111)
+        return self[key]
+
+    def __setitem__(self, key, value):
+        super(AxesContainer, self).__setitem__(key, value)
+        self.ylim[key] = (None, None)
+
+def sanitize_label(label, nprofiles):
+    label = ensure_list(label)
+    
+    if len(label) == 1:
+        label = label * nprofiles
+    
+    if len(label) != nprofiles:
+        raise RuntimeError("Number of labels must match number of profiles")
+
+    for l in label:
+        if l is not None and not isinstance(l, basestring):
+            raise RuntimeError("All labels must be None or a string")
+
+    return label
+
+class ParticlePlot(object):
+    r"""
+    Create a particle scatter plot from a data source.
+
+    Given a data object (all_data, region, sphere, etc.), an x field, 
+    and a y field (or fields), this will a scatter plot with one marker
+    for each particle.
+
+    Parameters
+    ----------
+    data_source : AMR3DData Object
+        The data object to be profiled, such as all_data, region, or 
+        sphere.
+    x_field : str
+        The field to plot on the x-axis.
+    y_fields : str
+        The field to plot on the y-axis.
+    plot_spec : dict or list of dicts
+        A dictionary or list of dictionaries containing plot keyword 
+        arguments.  For example, dict(color="blue", linestyle=".").
+        Default: None.
+
+    Examples
+    --------
+
+    This creates profiles of a single dataset.
+
+    >>> import yt
+    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> ad = ds.all_data()
+    >>> plot = ProfilePlot(ad, "density", ["temperature", "velocity_x"],
+    ...                    weight_field="cell_mass",
+    ...                    plot_spec=dict(color='red', linestyle="--"))
+    >>> plot.save()
+
+    This creates profiles from a time series object.
+
+    >>> es = yt.simulation("AMRCosmology.enzo", "Enzo")
+    >>> es.get_time_series()
+
+    >>> profiles = []
+    >>> labels = []
+    >>> plot_specs = []
+    >>> for ds in es[-4:]:
+    ...     ad = ds.all_data()
+    ...     profiles.append(create_profile(ad, ["density"],
+    ...                                    fields=["temperature",
+    ...                                            "velocity_x"]))
+    ...     labels.append(ds.current_redshift)
+    ...     plot_specs.append(dict(linestyle="--", alpha=0.7))
+    >>>
+    >>> plot = ProfilePlot.from_profiles(profiles, labels=labels,
+    ...                                  plot_specs=plot_specs)
+    >>> plot.save()
+
+    Use plot_line_property to change line properties of one or all profiles.
+    
+    """
+    x_log = None
+    y_log = None
+    z_log = None
+    x_title = None
+    y_title = None
+    _plot_valid = False
+
+    def __init__(self, data_source, x_field, y_field,
+                 plot_spec=None):
+
+        if plot_spec is None:
+            plot_spec = {'c':'b', 'marker':'.', 'linestyle':'None', 'markersize':8}
+
+        self.data_source = data_source
+        self.x_field = x_field
+        self.y_field = y_field
+        self.plot_spec = plot_spec
+
+        self.x_data = self.data_source[x_field]
+        self.y_data = self.data_source[y_field]
+        
+        self.figure = mpl.matplotlib.figure.Figure((10, 8))
+        self.axis = self.figure.add_subplot(111)
+        self._setup_plots()
+
+    def save(self, name=None):
+        r"""
+         Saves the scatter plot to disk.
+
+         Parameters
+         ----------
+         name : str
+             The output file keyword.
+
+         """
+        if not self._plot_valid:
+            self._setup_plots()
+        unique = set(self.figures.values())
+        if len(unique) < len(self.figures):
+            iters = izip(xrange(len(unique)), sorted(unique))
+        else:
+            iters = self.figures.iteritems()
+        if name is None:
+            if len(self.profiles) == 1:
+                prefix = self.profiles[0].ds
+            else:
+                prefix = "Multi-data"
+            name = "%s.png" % prefix
+        suffix = get_image_suffix(name)
+        prefix = name[:name.rfind(suffix)]
+        xfn = self.profiles[0].x_field
+        if isinstance(xfn, types.TupleType):
+            xfn = xfn[1]
+        if not suffix:
+            suffix = ".png"
+        canvas_cls = get_canvas(name)
+        fns = []
+        for uid, fig in iters:
+            if isinstance(uid, types.TupleType):
+                uid = uid[1]
+            canvas = canvas_cls(fig)
+            fns.append("%s_1d-Profile_%s_%s%s" % (prefix, xfn, uid, suffix))
+            mylog.info("Saving %s", fns[-1])
+            canvas.print_figure(fns[-1])
+        return fns
+
+    def show(self):
+        r"""This will send any existing plots to the IPython notebook.
+        function name.
+
+        If yt is being run from within an IPython session, and it is able to
+        determine this, this function will send any existing plots to the
+        notebook for display.
+
+        If yt can't determine if it's inside an IPython session, it will raise
+        YTNotInsideNotebook.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = ProfilePlot(ds.all_data(), 'density', 'temperature')
+        >>> pp.show()
+
+        """
+        if "__IPYTHON__" in dir(__builtin__):
+            api_version = get_ipython_api_version()
+            if api_version in ('0.10', '0.11'):
+                self._send_zmq()
+            else:
+                from IPython.display import display
+                display(self)
+        else:
+            raise YTNotInsideNotebook
+
+    def _repr_html_(self):
+        """Return an html representation of the plot object. Will display as a
+        png for each WindowPlotMPL instance in self.plots"""
+        ret = ''
+        canvas = mpl.FigureCanvasAgg(self.figure)
+        f = cStringIO.StringIO()
+        canvas.print_figure(f)
+        f.seek(0)
+        img = base64.b64encode(f.read())
+        ret += '<img src="data:image/png;base64,%s"><br>' % img
+        return ret
+
+    def _setup_plots(self):
+        self.axis.plot(np.array(self.x_data), np.array(self.y_data),
+                       **self.plot_spec)
+
+        xscale = self._get_field_log(self.x_field)
+        yscale = self._get_field_log(self.y_field)
+
+        xtitle = self._get_field_title(self.x_field)
+        ytitle = self._get_field_title(self.y_field)
+
+        self.axis.set_xscale(xscale)
+        self.axis.set_yscale(yscale)
+
+        self.axis.set_xlabel(xtitle)
+        self.axis.set_ylabel(ytitle)
+
+        self._plot_valid = True
+
+    @invalidate_plot
+    def set_line_property(self, property, value, index=None):
+        r"""
+        Set properties for one or all lines to be plotted.
+
+        Parameters
+        ----------
+        property : str
+            The line property to be set.
+        value : str, int, float
+            The value to set for the line property.
+        index : int
+            The index of the profile in the list of profiles to be 
+            changed.  If None, change all plotted lines.
+            Default : None.
+
+        Examples
+        --------
+
+        Change all the lines in a plot
+        plot.set_line_property("linestyle", "-")
+
+        Change a single line.
+        plot.set_line_property("linewidth", 4, index=0)
+        
+        """
+        if index is None:
+            specs = self.plot_spec
+        else:
+            specs = [self.plot_spec[index]]
+        for spec in specs:
+            spec[property] = value
+        return self
+
+    @invalidate_plot
+    def set_log(self, field, log):
+        """set a field to log or linear.
+
+        Parameters
+        ----------
+        field : string
+            the field to set a transform
+        log : boolean
+            Log on/off.
+        """
+        if field == "all":
+            self.x_log = log
+            for field in self.profiles[0].field_data.keys():
+                self.y_log[field] = log
+        else:
+            field, = self.profiles[0].data_source._determine_fields([field])
+            if field == self.profiles[0].x_field:
+                self.x_log = log
+            elif field in self.profiles[0].field_data:
+                self.y_log[field] = log
+            else:
+                raise KeyError("Field %s not in profile plot!" % (field))
+        return self
+
+    @invalidate_plot
+    def set_unit(self, field, unit):
+        """Sets a new unit for the requested field
+
+        Parameters
+        ----------
+        field : string
+           The name of the field that is to be changed.
+
+        new_unit : string or Unit object
+           The name of the new unit.
+        """
+        for profile in self.profiles:
+            if field == profile.x_field[1]:
+                profile.set_x_unit(unit)
+            elif field in self.profiles[0].field_map:
+                profile.set_field_unit(field, unit)
+            else:
+                raise KeyError("Field %s not in profile plot!" % (field))
+        return self
+
+    @invalidate_plot
+    def set_xlim(self, xmin=None, xmax=None):
+        """Sets the limits of the bin field
+
+        Parameters
+        ----------
+        
+        xmin : float or None
+          The new x minimum.  Defaults to None, which leaves the xmin
+          unchanged.
+
+        xmax : float or None
+          The new x maximum.  Defaults to None, which leaves the xmax
+          unchanged.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', 'temperature')
+        >>> pp.set_xlim(1e-29, 1e-24)
+        >>> pp.save()
+
+        """
+        for i, p in enumerate(self.profiles):
+            if xmin is None:
+                xmi = p.x_bins.min()
+            else:
+                xmi = xmin
+            if xmax is None:
+                xma = p.x_bins.max()
+            else:
+                xma = xmax
+            extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
+            units = {p.x_field: str(p.x.units)}
+            for field in p.field_map.values():
+                units[field] = str(p.field_data[field].units)
+            self.profiles[i] = \
+                create_profile(p.data_source, p.x_field,
+                               n_bins=len(p.x_bins)-2,
+                               fields=p.field_map.values(),
+                               weight_field=p.weight_field,
+                               accumulation=p.accumulation,
+                               fractional=p.fractional,
+                               extrema=extrema, units=units)
+        return self
+
+    @invalidate_plot
+    def set_ylim(self, field, ymin=None, ymax=None):
+        """Sets the plot limits for the specified field we are binning.
+
+        Parameters
+        ----------
+
+        field : string or field tuple
+
+        The field that we want to adjust the plot limits for.
+        
+        ymin : float or None
+          The new y minimum.  Defaults to None, which leaves the ymin
+          unchanged.
+
+        ymax : float or None
+          The new y maximum.  Defaults to None, which leaves the ymax
+          unchanged.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', ['temperature', 'x-velocity'])
+        >>> pp.set_ylim('temperature', 1e4, 1e6)
+        >>> pp.save()
+
+        """
+        for i, p in enumerate(self.profiles):
+            if field is 'all':
+                fields = self.axes.keys()
+            else:
+                fields = ensure_list(field)
+            for profile in self.profiles:
+                for field in profile.data_source._determine_fields(fields):
+                    if field in profile.field_map:
+                        field = profile.field_map[field]
+                    self.axes.ylim[field] = (ymin, ymax)
+                    # Continue on to the next profile.
+                    break
+        return self
+
+    def _get_field_log(self, field):
+        ds = self.data_source.ds
+        f, = self.data_source._determine_fields([field])
+        fi = ds._get_field_info(*f)
+        do_log = fi.take_log
+        scales = {True: 'log', False: 'linear'}
+        return scales[do_log]
+
+    def _get_field_label(self, field, field_info, field_unit, fractional=False):
+        field_unit = field_unit.latex_representation()
+        field_name = field_info.display_name
+        if isinstance(field, tuple): field = field[1]
+        if field_name is None:
+            field_name = r'$\rm{'+field+r'}$'
+            field_name = r'$\rm{'+field.replace('_','\/').title()+r'}$'
+        elif field_name.find('$') == -1:
+            field_name = field_name.replace(' ','\/')
+            field_name = r'$\rm{'+field_name+r'}$'
+        if fractional:
+            label = field_name + r'$\rm{\/Probability\/Density}$'
+        elif field_unit is None or field_unit == '' or field_unit == '1':
+            label = field_name
+        else:
+            field_unit = field_unit.latex_representation()
+            label = field_name+r'$\/\/('+field_unit+r')$'
+        return label
+
+    def _get_field_title(self, field):
+        ds = self.data_source.ds
+        f, = self.data_source._determine_fields([field])
+        fi = ds._get_field_info(*f)
+        field_unit = Unit(fi.units, registry=self.data_source.ds.unit_registry)
+        title = self._get_field_label(field, fi, field_unit)
+        return title


https://bitbucket.org/yt_analysis/yt/commits/21c2d905b5b1/
Changeset:   21c2d905b5b1
Branch:      yt
User:        atmyers
Date:        2014-08-06 06:59:17+00:00
Summary:     making particles plots have adjustable x and y limits
Affected #:  1 file

diff -r a6019f1e68e28a4cdbff1b926c56970ce93fedb3 -r 21c2d905b5b19447e491f0e2984ada82a57ef0bf yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -168,6 +168,8 @@
     z_log = None
     x_title = None
     y_title = None
+    x_lim = (None, None)
+    y_lim = (None, None)
     _plot_valid = False
 
     def __init__(self, data_source, x_field, y_field,
@@ -287,6 +289,9 @@
         self.axis.set_xlabel(xtitle)
         self.axis.set_ylabel(ytitle)
 
+        self.axis.set_xlim(*self.x_lim)
+        self.axis.set_ylim(*self.y_lim)
+
         self._plot_valid = True
 
     @invalidate_plot
@@ -371,7 +376,7 @@
 
     @invalidate_plot
     def set_xlim(self, xmin=None, xmax=None):
-        """Sets the limits of the bin field
+        """Sets the limits of the x field
 
         Parameters
         ----------
@@ -389,45 +394,21 @@
 
         >>> import yt
         >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', 'temperature')
-        >>> pp.set_xlim(1e-29, 1e-24)
+        >>> pp = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
+        >>> pp.set_xlim(0.1, 0.9)
         >>> pp.save()
 
         """
-        for i, p in enumerate(self.profiles):
-            if xmin is None:
-                xmi = p.x_bins.min()
-            else:
-                xmi = xmin
-            if xmax is None:
-                xma = p.x_bins.max()
-            else:
-                xma = xmax
-            extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
-            units = {p.x_field: str(p.x.units)}
-            for field in p.field_map.values():
-                units[field] = str(p.field_data[field].units)
-            self.profiles[i] = \
-                create_profile(p.data_source, p.x_field,
-                               n_bins=len(p.x_bins)-2,
-                               fields=p.field_map.values(),
-                               weight_field=p.weight_field,
-                               accumulation=p.accumulation,
-                               fractional=p.fractional,
-                               extrema=extrema, units=units)
+        self.x_lim = (xmin, xmax)
         return self
 
     @invalidate_plot
-    def set_ylim(self, field, ymin=None, ymax=None):
-        """Sets the plot limits for the specified field we are binning.
+    def set_ylim(self, ymin=None, ymax=None):
+        """Sets the limits for the y-axis of the plot.
 
         Parameters
         ----------
 
-        field : string or field tuple
-
-        The field that we want to adjust the plot limits for.
-        
         ymin : float or None
           The new y minimum.  Defaults to None, which leaves the ymin
           unchanged.
@@ -441,23 +422,12 @@
 
         >>> import yt
         >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', ['temperature', 'x-velocity'])
-        >>> pp.set_ylim('temperature', 1e4, 1e6)
+        >>> pp = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
+        >>> pp.set_ylim(1e1, 1e8)
         >>> pp.save()
 
         """
-        for i, p in enumerate(self.profiles):
-            if field is 'all':
-                fields = self.axes.keys()
-            else:
-                fields = ensure_list(field)
-            for profile in self.profiles:
-                for field in profile.data_source._determine_fields(fields):
-                    if field in profile.field_map:
-                        field = profile.field_map[field]
-                    self.axes.ylim[field] = (ymin, ymax)
-                    # Continue on to the next profile.
-                    break
+        self.y_lim = (ymin, ymax)
         return self
 
     def _get_field_log(self, field):
@@ -468,7 +438,7 @@
         scales = {True: 'log', False: 'linear'}
         return scales[do_log]
 
-    def _get_field_label(self, field, field_info, field_unit, fractional=False):
+    def _get_field_label(self, field, field_info, field_unit):
         field_unit = field_unit.latex_representation()
         field_name = field_info.display_name
         if isinstance(field, tuple): field = field[1]
@@ -478,12 +448,9 @@
         elif field_name.find('$') == -1:
             field_name = field_name.replace(' ','\/')
             field_name = r'$\rm{'+field_name+r'}$'
-        if fractional:
-            label = field_name + r'$\rm{\/Probability\/Density}$'
-        elif field_unit is None or field_unit == '' or field_unit == '1':
+        if field_unit is None or field_unit == '' or field_unit == '1':
             label = field_name
         else:
-            field_unit = field_unit.latex_representation()
             label = field_name+r'$\/\/('+field_unit+r')$'
         return label
 


https://bitbucket.org/yt_analysis/yt/commits/04889a538651/
Changeset:   04889a538651
Branch:      yt
User:        atmyers
Date:        2014-08-06 07:08:41+00:00
Summary:     saving particle plots to disks works
Affected #:  1 file

diff -r 21c2d905b5b19447e491f0e2984ada82a57ef0bf -r 04889a538651221e279f8de0deb562e063817e2b yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -202,34 +202,25 @@
          """
         if not self._plot_valid:
             self._setup_plots()
-        unique = set(self.figures.values())
-        if len(unique) < len(self.figures):
-            iters = izip(xrange(len(unique)), sorted(unique))
-        else:
-            iters = self.figures.iteritems()
         if name is None:
-            if len(self.profiles) == 1:
-                prefix = self.profiles[0].ds
-            else:
-                prefix = "Multi-data"
+            prefix = self.data_source.ds
             name = "%s.png" % prefix
         suffix = get_image_suffix(name)
         prefix = name[:name.rfind(suffix)]
-        xfn = self.profiles[0].x_field
+        xfn = self.x_field
         if isinstance(xfn, types.TupleType):
             xfn = xfn[1]
+        yfn = self.y_field
+        if isinstance(yfn, types.TupleType):
+            yfn = yfn[1]
         if not suffix:
             suffix = ".png"
         canvas_cls = get_canvas(name)
-        fns = []
-        for uid, fig in iters:
-            if isinstance(uid, types.TupleType):
-                uid = uid[1]
-            canvas = canvas_cls(fig)
-            fns.append("%s_1d-Profile_%s_%s%s" % (prefix, xfn, uid, suffix))
-            mylog.info("Saving %s", fns[-1])
-            canvas.print_figure(fns[-1])
-        return fns
+        canvas = canvas_cls(self.figure)
+        fn = "%s_ScatterPlot_%s_%s%s" % (prefix, xfn, yfn, suffix)
+        mylog.info("Saving %s", fn)
+        canvas.print_figure(fn)
+        return fn
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.


https://bitbucket.org/yt_analysis/yt/commits/1d499ad27cad/
Changeset:   1d499ad27cad
Branch:      yt
User:        atmyers
Date:        2014-08-06 07:19:44+00:00
Summary:     altering the plot_specs dict works
Affected #:  1 file

diff -r 04889a538651221e279f8de0deb562e063817e2b -r 1d499ad27cade7355013cf08aaf228b712b8abeb yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -265,6 +265,7 @@
         return ret
 
     def _setup_plots(self):
+        self.axis.cla()
         self.axis.plot(np.array(self.x_data), np.array(self.y_data),
                        **self.plot_spec)
 
@@ -286,7 +287,7 @@
         self._plot_valid = True
 
     @invalidate_plot
-    def set_line_property(self, property, value, index=None):
+    def set_line_property(self, property, value):
         r"""
         Set properties for one or all lines to be plotted.
 
@@ -296,10 +297,6 @@
             The line property to be set.
         value : str, int, float
             The value to set for the line property.
-        index : int
-            The index of the profile in the list of profiles to be 
-            changed.  If None, change all plotted lines.
-            Default : None.
 
         Examples
         --------
@@ -311,12 +308,8 @@
         plot.set_line_property("linewidth", 4, index=0)
         
         """
-        if index is None:
-            specs = self.plot_spec
-        else:
-            specs = [self.plot_spec[index]]
-        for spec in specs:
-            spec[property] = value
+        specs = self.plot_spec
+        specs[property] = value
         return self
 
     @invalidate_plot


https://bitbucket.org/yt_analysis/yt/commits/807320e2c3a5/
Changeset:   807320e2c3a5
Branch:      yt
User:        atmyers
Date:        2014-08-06 21:52:58+00:00
Summary:     docstring fix - the method is called set_line_property, not plot_line_property.
Affected #:  2 files

diff -r 1d499ad27cade7355013cf08aaf228b712b8abeb -r 807320e2c3a54bec927c789c615f492c8900b269 yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -140,27 +140,7 @@
     ...                    plot_spec=dict(color='red', linestyle="--"))
     >>> plot.save()
 
-    This creates profiles from a time series object.
-
-    >>> es = yt.simulation("AMRCosmology.enzo", "Enzo")
-    >>> es.get_time_series()
-
-    >>> profiles = []
-    >>> labels = []
-    >>> plot_specs = []
-    >>> for ds in es[-4:]:
-    ...     ad = ds.all_data()
-    ...     profiles.append(create_profile(ad, ["density"],
-    ...                                    fields=["temperature",
-    ...                                            "velocity_x"]))
-    ...     labels.append(ds.current_redshift)
-    ...     plot_specs.append(dict(linestyle="--", alpha=0.7))
-    >>>
-    >>> plot = ProfilePlot.from_profiles(profiles, labels=labels,
-    ...                                  plot_specs=plot_specs)
-    >>> plot.save()
-
-    Use plot_line_property to change line properties of one or all profiles.
+    Use set_line_property to change line properties of one or all profiles.
     
     """
     x_log = None

diff -r 1d499ad27cade7355013cf08aaf228b712b8abeb -r 807320e2c3a54bec927c789c615f492c8900b269 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -184,7 +184,7 @@
     ...                                  plot_specs=plot_specs)
     >>> plot.save()
 
-    Use plot_line_property to change line properties of one or all profiles.
+    Use set_line_property to change line properties of one or all profiles.
     
     """
     x_log = None


https://bitbucket.org/yt_analysis/yt/commits/1c607ab18031/
Changeset:   1c607ab18031
Branch:      yt
User:        atmyers
Date:        2014-08-06 21:56:59+00:00
Summary:     add correct docstring for the ParticlePlot
Affected #:  1 file

diff -r 807320e2c3a54bec927c789c615f492c8900b269 -r 1c607ab180310918881789194f7b84162b02f364 yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -124,20 +124,16 @@
         The field to plot on the y-axis.
     plot_spec : dict or list of dicts
         A dictionary or list of dictionaries containing plot keyword 
-        arguments.  For example, dict(color="blue", linestyle=".").
-        Default: None.
+        arguments.  For example, dict('c'='r', marker='.').
+        Default: dict('c'='b', 'marker'='.', 'linestyle'='None', 'markersize'=8)
 
     Examples
     --------
 
-    This creates profiles of a single dataset.
-
     >>> import yt
-    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
     >>> ad = ds.all_data()
-    >>> plot = ProfilePlot(ad, "density", ["temperature", "velocity_x"],
-    ...                    weight_field="cell_mass",
-    ...                    plot_spec=dict(color='red', linestyle="--"))
+    >>> plot = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
     >>> plot.save()
 
     Use set_line_property to change line properties of one or all profiles.


https://bitbucket.org/yt_analysis/yt/commits/efaddff6c755/
Changeset:   efaddff6c755
Branch:      yt
User:        atmyers
Date:        2014-08-06 22:17:29+00:00
Summary:     toggling log / linear scales
Affected #:  1 file

diff -r 1c607ab180310918881789194f7b84162b02f364 -r efaddff6c755e5ddf9a4faf7572625ae3e438acd yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -245,8 +245,7 @@
         self.axis.plot(np.array(self.x_data), np.array(self.y_data),
                        **self.plot_spec)
 
-        xscale = self._get_field_log(self.x_field)
-        yscale = self._get_field_log(self.y_field)
+        xscale, yscale = self._get_axis_log()
 
         xtitle = self._get_field_title(self.x_field)
         ytitle = self._get_field_title(self.y_field)
@@ -289,8 +288,8 @@
         return self
 
     @invalidate_plot
-    def set_log(self, field, log):
-        """set a field to log or linear.
+    def set_xlog(self, log):
+        """set the x-axis to log or linear.
 
         Parameters
         ----------
@@ -299,21 +298,25 @@
         log : boolean
             Log on/off.
         """
-        if field == "all":
-            self.x_log = log
-            for field in self.profiles[0].field_data.keys():
-                self.y_log[field] = log
-        else:
-            field, = self.profiles[0].data_source._determine_fields([field])
-            if field == self.profiles[0].x_field:
-                self.x_log = log
-            elif field in self.profiles[0].field_data:
-                self.y_log[field] = log
-            else:
-                raise KeyError("Field %s not in profile plot!" % (field))
+        self.x_log = log
         return self
 
     @invalidate_plot
+    def set_ylog(self, log):
+        """set the y-axis to log or linear.
+
+        Parameters
+        ----------
+        field : string
+            the field to set a transform
+        log : boolean
+            Log on/off.
+        """
+        self.y_log = log
+        return self
+    
+
+    @invalidate_plot
     def set_unit(self, field, unit):
         """Sets a new unit for the requested field
 
@@ -325,13 +328,12 @@
         new_unit : string or Unit object
            The name of the new unit.
         """
-        for profile in self.profiles:
-            if field == profile.x_field[1]:
-                profile.set_x_unit(unit)
-            elif field in self.profiles[0].field_map:
-                profile.set_field_unit(field, unit)
-            else:
-                raise KeyError("Field %s not in profile plot!" % (field))
+        if field == self.x_field:
+            profile.set_x_unit(unit)
+        elif field == self.y_field:
+            profile.set_field_unit(field, unit)
+        else:
+            raise KeyError("Field %s not in profile plot!" % (field))
         return self
 
     @invalidate_plot
@@ -390,13 +392,24 @@
         self.y_lim = (ymin, ymax)
         return self
 
-    def _get_field_log(self, field):
-        ds = self.data_source.ds
-        f, = self.data_source._determine_fields([field])
-        fi = ds._get_field_info(*f)
-        do_log = fi.take_log
+    def _get_axis_log(self):
+
+        xf, = self.data_source._determine_fields([self.x_field])
+        xfi = self.data_source.ds._get_field_info(*xf)
+        if self.x_log is None:
+            x_log = xfi.take_log
+        else:
+            x_log = self.x_log
+
+        yf, = self.data_source._determine_fields([self.y_field])
+        yfi = self.data_source.ds._get_field_info(*yf)
+        if self.y_log is None:
+            y_log = yfi.take_log
+        else:
+            y_log = self.y_log
+        
         scales = {True: 'log', False: 'linear'}
-        return scales[do_log]
+        return scales[x_log], scales[y_log]
 
     def _get_field_label(self, field, field_info, field_unit):
         field_unit = field_unit.latex_representation()


https://bitbucket.org/yt_analysis/yt/commits/a2c8ad23487c/
Changeset:   a2c8ad23487c
Branch:      yt
User:        atmyers
Date:        2014-08-06 22:21:31+00:00
Summary:     minor typo in docstrings
Affected #:  1 file

diff -r efaddff6c755e5ddf9a4faf7572625ae3e438acd -r a2c8ad23487c0ae4efedf67eee8cc21185afd642 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -120,7 +120,7 @@
         return defaultdict.__init__(self, default_factory)
 
 class ImagePlotContainer(object):
-    """A countainer for plots with colorbars.
+    """A container for plots with colorbars.
 
     """
     _plot_type = None


https://bitbucket.org/yt_analysis/yt/commits/217bb9ebdc77/
Changeset:   217bb9ebdc77
Branch:      yt
User:        atmyers
Date:        2014-08-06 22:39:59+00:00
Summary:     importing the Particle Plotter into the default name space
Affected #:  2 files

diff -r a2c8ad23487c0ae4efedf67eee8cc21185afd642 -r 217bb9ebdc77f9c3cc670deb2548c65abf05874b yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -143,7 +143,7 @@
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, ProfilePlot, PhasePlot
+    show_colormaps, ProfilePlot, PhasePlot, ParticlePlot
 
 from yt.visualization.volume_rendering.api import \
     off_axis_projection, ColorTransferFunction, \

diff -r a2c8ad23487c0ae4efedf67eee8cc21185afd642 -r 217bb9ebdc77f9c3cc670deb2548c65abf05874b yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -49,8 +49,11 @@
     OffAxisProjectionPlot
 
 from .profile_plotter import \
-     ProfilePlot, \
-     PhasePlot
+    ProfilePlot, \
+    PhasePlot
+
+from .particle_plotter import \
+    ParticlePlot
     
 from .base_plot_types import \
     get_multi_plot


https://bitbucket.org/yt_analysis/yt/commits/43038eeb8a8d/
Changeset:   43038eeb8a8d
Branch:      yt
User:        atmyers
Date:        2014-08-06 23:07:57+00:00
Summary:     setting units now works for particle plots as well - the last modification function I needed.
Affected #:  1 file

diff -r 217bb9ebdc77f9c3cc670deb2548c65abf05874b -r 43038eeb8a8d7b4bc744e675e8bb6e8fd0e43f0c yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -246,9 +246,7 @@
                        **self.plot_spec)
 
         xscale, yscale = self._get_axis_log()
-
-        xtitle = self._get_field_title(self.x_field)
-        ytitle = self._get_field_title(self.y_field)
+        xtitle, ytitle = self._get_axis_titles()
 
         self.axis.set_xscale(xscale)
         self.axis.set_yscale(yscale)
@@ -329,9 +327,9 @@
            The name of the new unit.
         """
         if field == self.x_field:
-            profile.set_x_unit(unit)
+            self.x_data.convert_to_units(unit)
         elif field == self.y_field:
-            profile.set_field_unit(field, unit)
+            self.y_data.convert_to_units(unit)
         else:
             raise KeyError("Field %s not in profile plot!" % (field))
         return self
@@ -427,10 +425,14 @@
             label = field_name+r'$\/\/('+field_unit+r')$'
         return label
 
-    def _get_field_title(self, field):
-        ds = self.data_source.ds
-        f, = self.data_source._determine_fields([field])
-        fi = ds._get_field_info(*f)
-        field_unit = Unit(fi.units, registry=self.data_source.ds.unit_registry)
-        title = self._get_field_label(field, fi, field_unit)
-        return title
+    def _get_axis_titles(self):
+
+        xfi = self.data_source.ds._get_field_info(self.x_field)
+        x_unit = Unit(self.x_data.units, registry=self.data_source.ds.unit_registry)
+        x_title = self._get_field_label(self.x_field, xfi, x_unit)
+
+        yfi = self.data_source.ds._get_field_info(self.y_field)
+        y_unit = Unit(self.y_data.units, registry=self.data_source.ds.unit_registry)
+        y_title = self._get_field_label(self.y_field, yfi, y_unit)
+
+        return (x_title, y_title)


https://bitbucket.org/yt_analysis/yt/commits/2ad38426f294/
Changeset:   2ad38426f294
Branch:      yt
User:        atmyers
Date:        2014-08-07 19:44:41+00:00
Summary:     as of yt-3.0, Chombo particles are supported
Affected #:  1 file

diff -r 43038eeb8a8d7b4bc744e675e8bb6e8fd0e43f0c -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -24,7 +24,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Chombo                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
+| Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Enzo                  |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 


https://bitbucket.org/yt_analysis/yt/commits/31e7046ca0ef/
Changeset:   31e7046ca0ef
Branch:      yt
User:        atmyers
Date:        2014-08-15 00:13:32+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  25 files

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -16,7 +16,7 @@
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
+BRANCH="yt" # This is the branch to which we will forcibly update.
 
 if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d705a81671d5692ed6691b3402115edbe9c98af815af5bb160ddf551bf02c76"
+  "signature": "sha256:427da1e1d02deb543246218dc8cce991268b518b25cfdd5944a4a436695f874b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -40,11 +40,13 @@
      "source": [
       "We will filter these into young stars and old stars by masking on the ('Stars', 'creation_time') field. \n",
       "\n",
-      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The second argument is a yt data container and is usually the only one used in a filter definition.\n",
+      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The first argument is a `ParticleFilter` object that contains metadata about the filter its self.  The second argument is a yt data container.\n",
       "\n",
-      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages.\n",
+      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages. \n",
       "\n",
-      "Old stars either formed dynamically in the simulation (ages greater than 5 Myr) or were present in the initial conditions (negative ages)."
+      "Conversely, let's define \"old\" stars as those stars formed dynamically in the simulation with ages greater than 5 Myr.  We also include stars with negative ages, since these stars were included in the simulation initial conditions.\n",
+      "\n",
+      "We make use of `pfilter.filtered_type` so that the filter definition will use the same particle type as the one specified in the call to `add_particle_filter` below.  This makes the filter definition usable for arbitrary particle types.  Since we're only filtering the `\"Stars\"` particle type in this example, we could have also replaced `pfilter.filtered_type` with `\"Stars\"` and gotten the same result."
      ]
     },
     {
@@ -52,12 +54,12 @@
      "collapsed": false,
      "input": [
       "def young_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_and(age.in_units('Myr') <= 5, age >= 0)\n",
       "    return filter\n",
       "\n",
       "def old_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_or(age.in_units('Myr') >= 5, age < 0)\n",
       "    return filter"
      ],
@@ -140,4 +142,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -179,6 +179,38 @@
      fields or that get aliased to themselves, we can specify a different
      desired output unit than the unit found on disk.
 
+Debugging a Derived Field
+-------------------------
+
+If your derived field is not behaving as you would like, you can insert a call
+to ``data._debug()`` to spawn an interactive interpreter whenever that line is
+reached.  Note that this is slightly different from calling
+``pdb.set_trace()``, as it will *only* trigger when the derived field is being
+called on an actual data object, rather than during the field detection phase.
+The starting position will be one function lower in the stack than you are
+likely interested in, but you can either step through back to the derived field
+function, or simply type ``u`` to go up a level in the stack.
+
+For instance, if you had defined this derived field:
+
+.. code-block:: python
+
+   @yt.derived_field(name = "funthings")
+   def funthings(field, data):
+       return data["sillythings"] + data["humorousthings"]**2.0
+
+And you wanted to debug it, you could do:
+
+.. code-block:: python
+
+   @yt.derived_field(name = "funthings")
+   def funthings(field, data):
+       data._debug()
+       return data["sillythings"] + data["humorousthings"]**2.0
+
+And now, when that derived field is actually used, you will be placed into a
+debugger.
+
 Units for Cosmological Datasets
 -------------------------------
 

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -20,7 +20,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | ARTIO                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     N      |   Full   |
+| Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1,8 +1,10 @@
-from scipy import optimize
-import numpy as na
+import numpy as np
 import h5py
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
+from yt.utilities.on_demand_imports import _scipy
+
+optimize = _scipy.optimize
 
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
         minError=1E-4, complexLim=.995,
@@ -83,7 +85,7 @@
     x0,xRes=x[0],x[1]-x[0]
 
     #Empty fit without any lines
-    yFit = na.ones(len(fluxData))
+    yFit = np.ones(len(fluxData))
 
     #Force the first and last flux pixel to be 1 to prevent OOB
     fluxData[0]=1
@@ -98,10 +100,10 @@
     #Fit all species one at a time in given order from low to high wavelength
     for species in orderFits:
         speciesDict = speciesDicts[species]
-        speciesLines = {'N':na.array([]),
-                        'b':na.array([]),
-                        'z':na.array([]),
-                        'group#':na.array([])}
+        speciesLines = {'N':np.array([]),
+                        'b':np.array([]),
+                        'z':np.array([]),
+                        'group#':np.array([])}
 
         #Set up wavelengths for species
         initWl = speciesDict['wavelength'][0]
@@ -131,7 +133,7 @@
                         yFitBounded,z,speciesDict,
                         minSize,minError)
 
-            if na.size(newLinesP)> 0:
+            if np.size(newLinesP)> 0:
 
                 #Check for EXPLOOOOSIIONNNSSS
                 newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
@@ -150,12 +152,12 @@
 
 
             #Add new group to all fitted lines
-            if na.size(newLinesP)>0:
-                speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
-                speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
-                speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
-                groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
-                speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+            if np.size(newLinesP)>0:
+                speciesLines['N']=np.append(speciesLines['N'],newLinesP[:,0])
+                speciesLines['b']=np.append(speciesLines['b'],newLinesP[:,1])
+                speciesLines['z']=np.append(speciesLines['z'],newLinesP[:,2])
+                groupNums = b_i*np.ones(np.size(newLinesP[:,0]))
+                speciesLines['group#']=np.append(speciesLines['group#'],groupNums)
 
         allSpeciesLines[species]=speciesLines
 
@@ -226,7 +228,7 @@
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
         initP[2]=initz
-        initP=na.array([initP])
+        initP=np.array([initP])
 
     linesP = initP
 
@@ -259,7 +261,7 @@
 
 
         #Set results of optimization
-        linesP = na.reshape(fitP,(-1,3))
+        linesP = np.reshape(fitP,(-1,3))
 
         #Generate difference between current best fit and data
         yNewFit=_gen_flux_lines(x,linesP,speciesDict)
@@ -288,7 +290,7 @@
                 break
 
         #If too many lines 
-        if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+        if np.shape(linesP)[0]>8 or np.size(linesP)+3>=len(x):
             #If its fitable by flag tools and still bad, use flag tools
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
@@ -315,17 +317,17 @@
             newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
-        linesP=na.append(linesP,[newP],axis=0)
+        linesP=np.append(linesP,[newP],axis=0)
 
 
     #Check the parameters of all lines to see if they fall in an
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict,x)
+        check=_check_params(np.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
-    linesP = na.delete(linesP,remove,axis=0)
+    linesP = np.delete(linesP,remove,axis=0)
 
     return linesP,flag
 
@@ -377,7 +379,7 @@
     #Iterate through test line guesses
     for initLines in lineTests:
         if initLines[1,0]==0:
-            initLines = na.delete(initLines,1,axis=0)
+            initLines = np.delete(initLines,1,axis=0)
 
         #Do fitting with initLines as first guess
         linesP,flag=_complex_fit(x,yDat,yFit,initz,
@@ -421,7 +423,7 @@
     """
 
     #Set up a bunch of empty lines
-    testP = na.zeros((10,2,3))
+    testP = np.zeros((10,2,3))
 
     testP[0,0,:]=[1E18,20,initz]
     testP[1,0,:]=[1E18,40,initz]
@@ -542,7 +544,7 @@
                 errBound = 10*errBound*len(yb)
 
             #Generate a fit and find the difference to data
-            yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+            yFitb=_gen_flux_lines(xb,np.array([p]),speciesDict)
             dif =yb-yFitb
 
 
@@ -557,7 +559,7 @@
                 break
 
     #Remove all bad line fits
-    linesP = na.delete(linesP,removeLines,axis=0)
+    linesP = np.delete(linesP,removeLines,axis=0)
 
     return linesP 
 
@@ -755,7 +757,7 @@
             if firstLine: 
                 break
 
-    flux = na.exp(-y)
+    flux = np.exp(-y)
     return flux
 
 def _gen_tau(t, p, f, Gamma, lambda_unshifted):
@@ -768,7 +770,7 @@
     a=7.95774715459E-15*Gamma*lambda_unshifted/b
     x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
     
-    H = na.zeros(len(x))
+    H = np.zeros(len(x))
     H = voigt(a,x)
     
     tau = tau_o*H
@@ -910,9 +912,9 @@
 
             # Make the final line parameters. Its annoying because
             # one or both regions may have fit to nothing
-            if na.size(p1)> 0 and na.size(p2)>0:
-                p = na.r_[p1,p2]
-            elif na.size(p1) > 0:
+            if np.size(p1)> 0 and np.size(p2)>0:
+                p = np.r_[p1,p2]
+            elif np.size(p1) > 0:
                 p = p1
             else:
                 p = p2
@@ -952,7 +954,7 @@
             # max and min to prevent boundary errors
 
             flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
-            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+            flux = np.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
 
             #Find regions that are absorbing outside the region we fit
             flux_dif = 1 - flux
@@ -971,7 +973,7 @@
                 remove_lines.append(i)
     
     if remove_lines:
-        p = na.delete(p, remove_lines, axis=0)
+        p = np.delete(p, remove_lines, axis=0)
 
     return p
 

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -225,13 +225,13 @@
 
     # accumulate, if necessary
     if accumulation:
-        used = my_profile.used        
+        used = my_profile.used
         for field in my_profile.field_data:
             if weight_field is None:
                 my_profile.field_data[field][used] = \
                     np.cumsum(my_profile.field_data[field][used])
             else:
-                my_weight = my_profile.weight[:, 0]
+                my_weight = my_profile.weight
                 my_profile.field_data[field][used] = \
                   np.cumsum(my_profile.field_data[field][used] * my_weight[used]) / \
                   np.cumsum(my_weight[used])

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -27,7 +27,7 @@
 from yt.extern.six import add_metaclass
 
 from yt.config import ytcfg
-from yt.funcs import mylog
+from yt.funcs import mylog, ensure_dir_exists
 from yt.utilities.performance_counters import \
     time_function, \
     yt_counters

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -233,7 +233,6 @@
             fi += 1
         pi += npart
     num_p[0] = local_parts
-    del ds._instantiated_hierarchy
     del ds
 
 cdef class RockstarInterface:

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -22,14 +22,10 @@
 except ImportError:
     pass
 
-try:
-    import xspec
-    from scipy.integrate import cumtrapz
-    from scipy import stats        
-except ImportError:
-    pass
-from yt.utilities.on_demand_imports import _astropy
+from yt.utilities.on_demand_imports import _astropy, _scipy
+
 pyfits = _astropy.pyfits
+stats = _scipy.stats
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
@@ -212,11 +208,14 @@
         try:
             self.line_handle = pyfits.open(self.linefile)
         except IOError:
-            mylog.error("LINE file %s does not exist" % (self.linefile))
+            mylog.error("LINE file %s does not exist" % self.linefile)
+            raise IOError("LINE file %s does not exist" % self.linefile)
         try:
             self.coco_handle = pyfits.open(self.cocofile)
         except IOError:
-            mylog.error("COCO file %s does not exist" % (self.cocofile))
+            mylog.error("COCO file %s does not exist" % self.cocofile)
+            raise IOError("COCO file %s does not exist" % self.cocofile)
+
         self.Tvals = self.line_handle[1].data.field("kT")
         self.dTvals = np.diff(self.Tvals)
         self.minlam = self.wvbins.min()
@@ -224,18 +223,18 @@
     
     def _make_spectrum(self, element, tindex):
         
-        tmpspec = np.zeros((self.nchan))
+        tmpspec = np.zeros(self.nchan)
         
         i = np.where((self.line_handle[tindex].data.field('element') == element) &
                      (self.line_handle[tindex].data.field('lambda') > self.minlam) &
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
-        vec = np.zeros((self.nchan))
+        vec = np.zeros(self.nchan)
         E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.ndarray_view()
         if self.thermal_broad:
-            vec = np.zeros((self.nchan))
+            vec = np.zeros(self.nchan)
             sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value
             for E, sig, a in zip(E0, sigma, amp):
                 cdf = stats.norm(E,sig).cdf(ebins)
@@ -270,10 +269,10 @@
         """
         Get the thermal emission spectrum given a temperature *kT* in keV. 
         """
-        cspec_l = np.zeros((self.nchan))
-        mspec_l = np.zeros((self.nchan))
-        cspec_r = np.zeros((self.nchan))
-        mspec_r = np.zeros((self.nchan))
+        cspec_l = np.zeros(self.nchan)
+        mspec_l = np.zeros(self.nchan)
+        cspec_r = np.zeros(self.nchan)
+        mspec_r = np.zeros(self.nchan)
         tindex = np.searchsorted(self.Tvals, kT)-1
         if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
             return cspec_l*cm3/units.s, mspec_l*cm3/units.s

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -336,8 +336,8 @@
                                   registry=self.ds.unit_registry)
             if self.weight_field is None and not self._sum_only:
                 u_obj = Unit(units, registry=self.ds.unit_registry)
-                if (u_obj.is_code_unit and not u_obj.is_dimensionless) and \
-                  input_units != units or self.ds.no_cgs_equiv_length:
+                if ((u_obj.is_code_unit or self.ds.no_cgs_equiv_length) and
+                    not u_obj.is_dimensionless) and input_units != units:
                     final_unit = "(%s) * code_length" % units
                     self[field].convert_to_units(final_unit)
         for i in data.keys(): self[i] = data.pop(i)

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -129,6 +129,15 @@
         self._index = self.ds.index
         return self._index
 
+    def _debug(self):
+        """
+        When called from within a derived field, this will run pdb.  However,
+        during field detection, it will not.  This allows you to more easily
+        debug fields that are being called on actual objects.
+        """
+        import pdb
+        pdb.set_trace()
+
     def _set_default_field_parameters(self):
         self.field_parameters = {}
         for k,v in self._default_field_parameters.items():

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -148,6 +148,10 @@
             self[item] = self._read_data(item)
         return self[item]
 
+    def _debug(self):
+        # We allow this to pass through.
+        return
+
     def deposit(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))
 

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -107,7 +107,7 @@
         self.directory = os.path.dirname(self.dataset.filename)
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
-        self.index_filename = self.dataset.filename
+        self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
         #self.directory = os.path.dirname(self.index_filename)
         self._fhandle = file(self.index_filename,'rb')
         GridIndex.__init__(self, ds, dataset_type)
@@ -366,7 +366,7 @@
         # Unfortunately we now have to mandate that the index gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.
-        self.h
+        self.index
 
     def _set_code_unit_attributes(self):
         """
@@ -458,14 +458,13 @@
             self.hubble_constant = self.cosmological_simulation = 0.0
         self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
-        if self.specified_parameters.has_key("gamma") :
+        if self.specified_parameters.has_key("gamma"):
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
-        else :
+        else:
             self.parameters["Gamma"] = 5./3. 
         self.geometry = self.specified_parameters.get("geometry", "cartesian")
         self._handle.close()
 
-
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -25,6 +25,11 @@
 erg_units = "code_mass * (code_length/code_time)**2"
 rho_units = "code_mass / code_length**3"
 
+def velocity_field(comp):
+    def _velocity(field, data):
+        return data["athena", "momentum_%s" % comp]/data["athena","density"]
+    return _velocity
+
 class AthenaFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", ("code_mass/code_length**3", ["density"], None)),
@@ -41,19 +46,17 @@
     def setup_fluid_fields(self):
         # Add velocity fields
         for comp in "xyz":
-            vel_field = ("athena", "velocity_%s" % (comp))
-            mom_field = ("athena", "momentum_%s" % (comp))
+            vel_field = ("athena", "velocity_%s" % comp)
+            mom_field = ("athena", "momentum_%s" % comp)
             if vel_field in self.field_list:
                 self.add_output_field(vel_field, units="code_length/code_time")
-                self.alias(("gas","velocity_%s" % (comp)), vel_field,
+                self.alias(("gas","velocity_%s" % comp), vel_field,
                            units="cm/s")
             elif mom_field in self.field_list:
                 self.add_output_field(mom_field,
-                                      units="code_mass*code_length/code_time")
-                f = lambda data: data["athena","momentum_%s" % (comp)] / \
-                                 data["athena","density"]
-                self.add_field(("gas","velocity_%s" % (comp)),
-                               function=f, units = "cm/s")
+                                      units="code_mass/code_time/code_length**2")
+                self.add_field(("gas","velocity_%s" % comp),
+                               function=velocity_field(comp), units = "cm/s")
         # Add pressure, energy, and temperature fields
         def ekin1(data):
             return 0.5*(data["athena","momentum_x"]**2 +
@@ -96,6 +99,8 @@
                            function=_total_energy,
                            units="erg/g")
         elif ("athena","total_energy") in self.field_list:
+            self.add_output_field(("athena","total_energy"),
+                                  units=pres_units)
             def _pressure(field, data):
                 return eint_from_etot(data)*(data.ds.gamma-1.0)
             self.add_field(("gas","pressure"), function=_pressure,

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/frontends/athena/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -0,0 +1,59 @@
+"""
+Athena frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.athena.api import AthenaDataset
+
+_fields_cloud = ("scalar[0]", "density", "total_energy")
+
+cloud = "ShockCloud/id0/Cloud.0050.vtk"
+ at requires_ds(cloud)
+def test_cloud():
+    ds = data_dir_load(cloud)
+    yield assert_equal, str(ds), "Cloud.0050"
+    for test in small_patch_amr(cloud, _fields_cloud):
+        test_cloud.__name__ = test.description
+        yield test
+
+_fields_blast = ("temperature", "density", "velocity_magnitude")
+
+blast = "MHDBlast/id0/Blast.0100.vtk"
+ at requires_ds(blast)
+def test_blast():
+    ds = data_dir_load(blast)
+    yield assert_equal, str(ds), "Blast.0100"
+    for test in small_patch_amr(blast, _fields_blast):
+        test_blast.__name__ = test.description
+        yield test
+
+parameters_stripping = {"time_unit":3.086e14,
+                        "length_unit":8.0236e22,
+                        "mass_unit":9.999e-30*8.0236e22**3}
+
+_fields_stripping = ("temperature", "density", "specific_scalar[0]")
+
+stripping = "RamPressureStripping/id0/rps.0062.vtk"
+ at requires_ds(stripping, big_data=True)
+def test_stripping():
+    ds = data_dir_load(stripping, kwargs={"parameters":parameters_stripping})
+    yield assert_equal, str(ds), "rps.0062"
+    for test in small_patch_amr(stripping, _fields_stripping):
+        test_stripping.__name__ = test.description
+        yield test

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/frontends/halo_catalogs/rockstar/definitions.py
--- a/yt/frontends/halo_catalogs/rockstar/definitions.py
+++ b/yt/frontends/halo_catalogs/rockstar/definitions.py
@@ -35,17 +35,25 @@
     ("unused", BINARY_HEADER_SIZE - 4*12 - 4 - 8*6 - 12, "c")
 )
 
-halo_dt = np.dtype([
+# Note the final field here, which is a field for min/max format revision in
+# which the field appears.
+
+KNOWN_REVISIONS=[0, 1]
+
+halo_dt = [
     ('particle_identifier', np.int64),
     ('particle_position_x', np.float32),
     ('particle_position_y', np.float32),
     ('particle_position_z', np.float32),
+    ('particle_mposition_x', np.float32, (0, 0)),
+    ('particle_mposition_y', np.float32, (0, 0)),
+    ('particle_mposition_z', np.float32, (0, 0)),
     ('particle_velocity_x', np.float32),
     ('particle_velocity_y', np.float32),
     ('particle_velocity_z', np.float32),
-    ('particle_corevel_x', np.float32),
-    ('particle_corevel_y', np.float32),
-    ('particle_corevel_z', np.float32),
+    ('particle_corevel_x', np.float32, (1, 100)),
+    ('particle_corevel_y', np.float32, (1, 100)),
+    ('particle_corevel_z', np.float32, (1, 100)),
     ('particle_bulkvel_x', np.float32),
     ('particle_bulkvel_y', np.float32),
     ('particle_bulkvel_z', np.float32),
@@ -75,15 +83,15 @@
     ('Ax', np.float32),
     ('Ay', np.float32),
     ('Az', np.float32),
-    ('b_to_a2', np.float32),
-    ('c_to_a2', np.float32),
-    ('A2x', np.float32),
-    ('A2y', np.float32),
-    ('A2z', np.float32),
+    ('b_to_a2', np.float32, (1, 100)),
+    ('c_to_a2', np.float32, (1, 100)),
+    ('A2x', np.float32, (1, 100)),
+    ('A2y', np.float32, (1, 100)),
+    ('A2z', np.float32, (1, 100)),
     ('bullock_spin', np.float32),
     ('kin_to_pot', np.float32),
-    ('m_pe_b', np.float32),
-    ('m_pe_d', np.float32),
+    ('m_pe_b', np.float32, (1, 100)),
+    ('m_pe_d', np.float32, (1, 100)),
     ('num_p', np.int64),
     ('num_child_particles', np.int64),
     ('p_start', np.int64),
@@ -93,7 +101,20 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
-], align=True)
+]
+
+halo_dts = {}
+
+for rev in KNOWN_REVISIONS:
+    halo_dts[rev] = []
+    for item in halo_dt:
+        if len(item) == 2:
+            halo_dts[rev].append(item)
+        else:
+            mi, ma = item[2]
+            if (mi <= rev) and (rev <= ma):
+                halo_dts[rev].append(item[:2])
+    halo_dts[rev] = np.dtype(halo_dts[rev], align=True)
 
 particle_dt = np.dtype([
     ('particle_identifier', np.int64),

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -24,7 +24,7 @@
     BaseIOHandler
 
 import yt.utilities.fortran_utils as fpu
-from .definitions import halo_dt
+from .definitions import halo_dts
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
@@ -32,6 +32,10 @@
 class IOHandlerRockstarBinary(BaseIOHandler):
     _dataset_type = "rockstar_binary"
 
+    def __init__(self, *args, **kwargs):
+        super(IOHandlerRockstarBinary, self).__init__(*args, **kwargs)
+        self._halo_dt = halo_dts[self.ds.parameters['format_revision']]
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
@@ -45,11 +49,12 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
+        
         for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
-                halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+                halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
                 x = halos['particle_position_x'].astype("float64")
                 y = halos['particle_position_y'].astype("float64")
                 z = halos['particle_position_z'].astype("float64")
@@ -70,7 +75,7 @@
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     f.seek(data_file._position_offset, os.SEEK_SET)
-                    halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+                    halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
                     x = halos['particle_position_x'].astype("float64")
                     y = halos['particle_position_y'].astype("float64")
                     z = halos['particle_position_z'].astype("float64")
@@ -89,7 +94,7 @@
         ind = 0
         with open(data_file.filename, "rb") as f:
             f.seek(data_file._position_offset, os.SEEK_SET)
-            halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+            halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
             pos = np.empty((halos.size, 3), dtype="float64")
             # These positions are in Mpc, *not* "code" units
             pos = data_file.ds.arr(pos, "code_length")
@@ -121,6 +126,6 @@
         return {'halos': data_file.header['num_halos']}
 
     def _identify_fields(self, data_file):
-        fields = [("halos", f) for f in halo_dt.fields if
+        fields = [("halos", f) for f in self._halo_dt.fields if
                   "padding" not in f]
         return fields, {}

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -344,21 +344,26 @@
                         for ptype in self._ptypes):
                 continue
             pos += 4
+            any_ptypes = False
             for ptype in self._ptypes:
                 if field == "Mass" and ptype not in self.var_mass:
                     continue
                 if (ptype, field) not in field_list:
                     continue
                 offsets[(ptype, field)] = pos
+                any_ptypes = True
                 if field in self._vector_fields:
                     pos += 3 * pcount[ptype] * fs
                 else:
                     pos += pcount[ptype] * fs
             pos += 4
+            if not any_ptypes: pos -= 8
         if file_size is not None:
             if file_size != pos:
                 mylog.warning("Your Gadget-2 file may have extra " +
-                              "columns or different precision!")
+                              "columns or different precision!" +
+                              " (%s file vs %s computed)",
+                              file_size, pos)
         return offsets
 
     def _identify_fields(self, domain):

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -758,7 +758,7 @@
 
 def big_patch_amr(ds_fn, fields, input_center="max", input_weight="density"):
     if not can_run_ds(ds_fn): return
-    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    dso = [ None, ("sphere", (input_center, (0.1, 'unitary')))]
     yield GridHierarchyTest(ds_fn)
     yield ParentageRelationshipsTest(ds_fn)
     for field in fields:

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -252,7 +252,7 @@
 
 axis_wcs = [[1,2],[0,2],[0,1]]
 
-def construct_image(data_source):
+def construct_image(data_source, center=None):
     ds = data_source.ds
     axis = data_source.axis
     if hasattr(ds, "wcs"):
@@ -266,11 +266,14 @@
     else:
         # This is some other kind of dataset
         unit = ds.get_smallest_appropriate_unit(ds.domain_width.max())
+        if center is None:
+            crval = [0.0,0.0]
+        else:
+            crval = [(ds.domain_center-center)[idx].in_units(unit) for idx in axis_wcs[axis]]
         dx = ds.index.get_smallest_dx()
         nx, ny = (ds.domain_width[axis_wcs[axis]]/dx).ndarray_view().astype("int")
         crpix = [0.5*(nx+1), 0.5*(ny+1)]
         cdelt = [dx.in_units(unit)]*2
-        crval = [ds.domain_center[idx].in_units(unit) for idx in axis_wcs[axis]]
         cunit = [unit]*2
         ctype = ["LINEAR"]*2
     frb = data_source.to_frb((1.0,"unitary"), (nx,ny))
@@ -295,7 +298,7 @@
     fields : string or list of strings
         The fields to slice
     center : A sequence floats, a string, or a tuple.
-         The coordinate of the center of the image. If set to 'c', 'center' or
+         The coordinate of the origin of the image. If set to 'c', 'center' or
          left blank, the plot is centered on the middle of the domain. If set to
          'max' or 'm', the center will be located at the maximum of the
          ('gas', 'density') field. Units can be specified by passing in center
@@ -308,7 +311,7 @@
         axis = fix_axis(axis, ds)
         center = get_sanitized_center(center, ds)
         slc = ds.slice(axis, center[axis], **kwargs)
-        w, frb = construct_image(slc)
+        w, frb = construct_image(slc, center=center)
         super(FITSSlice, self).__init__(frb, fields=fields, wcs=w)
         for i, field in enumerate(fields):
             self[i].header["bunit"] = str(frb[field].units)
@@ -327,12 +330,21 @@
         The fields to project
     weight_field : string
         The field used to weight the projection.
+    center : A sequence floats, a string, or a tuple.
+        The coordinate of the origin of the image. If set to 'c', 'center' or
+        left blank, the plot is centered on the middle of the domain. If set to
+        'max' or 'm', the center will be located at the maximum of the
+        ('gas', 'density') field. Units can be specified by passing in center
+        as a tuple containing a coordinate and string unit name or by passing
+        in a YTArray.  If a list or unitless array is supplied, code units are
+        assumed.
     """
-    def __init__(self, ds, axis, fields, weight_field=None, **kwargs):
+    def __init__(self, ds, axis, fields, center="c", weight_field=None, **kwargs):
         fields = ensure_list(fields)
         axis = fix_axis(axis, ds)
+        center = get_sanitized_center(center, ds)
         prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs)
-        w, frb = construct_image(prj)
+        w, frb = construct_image(prj, center=center)
         super(FITSProjection, self).__init__(frb, fields=fields, wcs=w)
         for i, field in enumerate(fields):
             self[i].header["bunit"] = str(frb[field].units)

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r 31e7046ca0ef025163c4dab4db960628d8f41352 yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -10,7 +10,20 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+class NotAModule(object):
+    """
+    A class to implement an informative error message that will be outputted if
+    someone tries to use an on-demand import without having the requisite package installed.
+    """
+    def __init__(self, pkg_name):
+        self.pkg_name = pkg_name
+
+    def __getattr__(self, item):
+        raise ImportError("This functionality requires the %s package to be installed."
+                          % self.pkg_name)
+
 class astropy_imports:
+    _name = "astropy"
     _pyfits = None
     @property
     def pyfits(self):
@@ -19,7 +32,7 @@
                 import astropy.io.fits as pyfits
                 self.log
             except ImportError:
-                pyfits = None
+                pyfits = NotAModule(self._name)
             self._pyfits = pyfits
         return self._pyfits
 
@@ -31,7 +44,7 @@
                 import astropy.wcs as pywcs
                 self.log
             except ImportError:
-                pywcs = None
+                pywcs = NotAModule(self._name)
             self._pywcs = pywcs
         return self._pywcs
 
@@ -44,7 +57,7 @@
                 if log.exception_logging_enabled():
                     log.disable_exception_logging()
             except ImportError:
-                log = None
+                log = NotAModule(self._name)
             self._log = log
         return self._log
 
@@ -55,7 +68,7 @@
             try:
                 from astropy import units
             except ImportError:
-                units = None
+                units = NotAModule(self._name)
             self._units = units
         return self._units
 
@@ -67,8 +80,56 @@
                 import astropy.convolution as conv
                 self.log
             except ImportError:
-                conv = None
+                conv = NotAModule(self._name)
             self._conv = conv
         return self._conv
 
-_astropy = astropy_imports()
\ No newline at end of file
+_astropy = astropy_imports()
+
+class scipy_imports:
+    _name = "scipy"
+    _integrate = None
+    @property
+    def integrate(self):
+        if self._integrate is None:
+            try:
+                import scipy.integrate as integrate
+            except ImportError:
+                integrate = NotAModule(self._name)
+            self._integrate = integrate
+        return self._integrate
+
+    _stats = None
+    @property
+    def stats(self):
+        if self._stats is None:
+            try:
+                import scipy.stats as stats
+            except ImportError:
+                stats = NotAModule(self._name)
+            self._stats = stats
+        return self._stats
+
+    _optimize = None
+    @property
+    def optimize(self):
+        if self._optimize is None:
+            try:
+                import scipy.optimize as optimize
+            except ImportError:
+                optimize = NotAModule(self._name)
+            self._optimize = optimize
+        return self._optimize
+
+    _interpolate = None
+    @property
+    def interpolate(self):
+        if self._interpolate is None:
+            try:
+                import scipy.interpolate as interpolate
+            except ImportError:
+                interpolate = NotAModule(self._name)
+            self._interpolate = interpolate
+        return self._interpolate
+
+_scipy = scipy_imports()
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/be508def196f/
Changeset:   be508def196f
Branch:      yt
User:        atmyers
Date:        2014-08-08 22:47:56+00:00
Summary:     be more robust about reading in the simulation time for Chombo datasets
Affected #:  1 file

diff -r 2ad38426f294db9a65b138d5d4fd53f9cdc6a22b -r be508def196fd99293e3f4f2b0799c31782f5d3e yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -255,12 +255,17 @@
         if D == 3:
             self.dataset_type = 'chombo_hdf5'
 
-        # some datasets will not be time-dependent, make
+        # some datasets will not be time-dependent, and to make
+        # make matters worse the simulation time is not always
+        # stored in the same place in the hdf file! Make
         # sure we handle that here.
         try:
             self.current_time = self._handle.attrs['time']
         except KeyError:
-            self.current_time = 0.0
+            try: 
+                self.current_time = self._handle['level_0'].attrs['time']
+            except KeyError:
+                self.current_time = 0.0
 
         self.geometry = "cartesian"
         self.ini_filename = ini_filename


https://bitbucket.org/yt_analysis/yt/commits/f58d61821879/
Changeset:   f58d61821879
Branch:      yt
User:        atmyers
Date:        2014-08-15 00:16:19+00:00
Summary:     merging
Affected #:  25 files

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -16,7 +16,7 @@
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
+BRANCH="yt" # This is the branch to which we will forcibly update.
 
 if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d705a81671d5692ed6691b3402115edbe9c98af815af5bb160ddf551bf02c76"
+  "signature": "sha256:427da1e1d02deb543246218dc8cce991268b518b25cfdd5944a4a436695f874b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -40,11 +40,13 @@
      "source": [
       "We will filter these into young stars and old stars by masking on the ('Stars', 'creation_time') field. \n",
       "\n",
-      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The second argument is a yt data container and is usually the only one used in a filter definition.\n",
+      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The first argument is a `ParticleFilter` object that contains metadata about the filter its self.  The second argument is a yt data container.\n",
       "\n",
-      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages.\n",
+      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages. \n",
       "\n",
-      "Old stars either formed dynamically in the simulation (ages greater than 5 Myr) or were present in the initial conditions (negative ages)."
+      "Conversely, let's define \"old\" stars as those stars formed dynamically in the simulation with ages greater than 5 Myr.  We also include stars with negative ages, since these stars were included in the simulation initial conditions.\n",
+      "\n",
+      "We make use of `pfilter.filtered_type` so that the filter definition will use the same particle type as the one specified in the call to `add_particle_filter` below.  This makes the filter definition usable for arbitrary particle types.  Since we're only filtering the `\"Stars\"` particle type in this example, we could have also replaced `pfilter.filtered_type` with `\"Stars\"` and gotten the same result."
      ]
     },
     {
@@ -52,12 +54,12 @@
      "collapsed": false,
      "input": [
       "def young_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_and(age.in_units('Myr') <= 5, age >= 0)\n",
       "    return filter\n",
       "\n",
       "def old_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_or(age.in_units('Myr') >= 5, age < 0)\n",
       "    return filter"
      ],
@@ -140,4 +142,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -179,6 +179,38 @@
      fields or that get aliased to themselves, we can specify a different
      desired output unit than the unit found on disk.
 
+Debugging a Derived Field
+-------------------------
+
+If your derived field is not behaving as you would like, you can insert a call
+to ``data._debug()`` to spawn an interactive interpreter whenever that line is
+reached.  Note that this is slightly different from calling
+``pdb.set_trace()``, as it will *only* trigger when the derived field is being
+called on an actual data object, rather than during the field detection phase.
+The starting position will be one function lower in the stack than you are
+likely interested in, but you can either step through back to the derived field
+function, or simply type ``u`` to go up a level in the stack.
+
+For instance, if you had defined this derived field:
+
+.. code-block:: python
+
+   @yt.derived_field(name = "funthings")
+   def funthings(field, data):
+       return data["sillythings"] + data["humorousthings"]**2.0
+
+And you wanted to debug it, you could do:
+
+.. code-block:: python
+
+   @yt.derived_field(name = "funthings")
+   def funthings(field, data):
+       data._debug()
+       return data["sillythings"] + data["humorousthings"]**2.0
+
+And now, when that derived field is actually used, you will be placed into a
+debugger.
+
 Units for Cosmological Datasets
 -------------------------------
 

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -20,7 +20,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | ARTIO                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     N      |   Full   |
+| Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1,8 +1,10 @@
-from scipy import optimize
-import numpy as na
+import numpy as np
 import h5py
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
+from yt.utilities.on_demand_imports import _scipy
+
+optimize = _scipy.optimize
 
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
         minError=1E-4, complexLim=.995,
@@ -83,7 +85,7 @@
     x0,xRes=x[0],x[1]-x[0]
 
     #Empty fit without any lines
-    yFit = na.ones(len(fluxData))
+    yFit = np.ones(len(fluxData))
 
     #Force the first and last flux pixel to be 1 to prevent OOB
     fluxData[0]=1
@@ -98,10 +100,10 @@
     #Fit all species one at a time in given order from low to high wavelength
     for species in orderFits:
         speciesDict = speciesDicts[species]
-        speciesLines = {'N':na.array([]),
-                        'b':na.array([]),
-                        'z':na.array([]),
-                        'group#':na.array([])}
+        speciesLines = {'N':np.array([]),
+                        'b':np.array([]),
+                        'z':np.array([]),
+                        'group#':np.array([])}
 
         #Set up wavelengths for species
         initWl = speciesDict['wavelength'][0]
@@ -131,7 +133,7 @@
                         yFitBounded,z,speciesDict,
                         minSize,minError)
 
-            if na.size(newLinesP)> 0:
+            if np.size(newLinesP)> 0:
 
                 #Check for EXPLOOOOSIIONNNSSS
                 newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
@@ -150,12 +152,12 @@
 
 
             #Add new group to all fitted lines
-            if na.size(newLinesP)>0:
-                speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
-                speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
-                speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
-                groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
-                speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+            if np.size(newLinesP)>0:
+                speciesLines['N']=np.append(speciesLines['N'],newLinesP[:,0])
+                speciesLines['b']=np.append(speciesLines['b'],newLinesP[:,1])
+                speciesLines['z']=np.append(speciesLines['z'],newLinesP[:,2])
+                groupNums = b_i*np.ones(np.size(newLinesP[:,0]))
+                speciesLines['group#']=np.append(speciesLines['group#'],groupNums)
 
         allSpeciesLines[species]=speciesLines
 
@@ -226,7 +228,7 @@
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
         initP[2]=initz
-        initP=na.array([initP])
+        initP=np.array([initP])
 
     linesP = initP
 
@@ -259,7 +261,7 @@
 
 
         #Set results of optimization
-        linesP = na.reshape(fitP,(-1,3))
+        linesP = np.reshape(fitP,(-1,3))
 
         #Generate difference between current best fit and data
         yNewFit=_gen_flux_lines(x,linesP,speciesDict)
@@ -288,7 +290,7 @@
                 break
 
         #If too many lines 
-        if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+        if np.shape(linesP)[0]>8 or np.size(linesP)+3>=len(x):
             #If its fitable by flag tools and still bad, use flag tools
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
@@ -315,17 +317,17 @@
             newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
-        linesP=na.append(linesP,[newP],axis=0)
+        linesP=np.append(linesP,[newP],axis=0)
 
 
     #Check the parameters of all lines to see if they fall in an
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict,x)
+        check=_check_params(np.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
-    linesP = na.delete(linesP,remove,axis=0)
+    linesP = np.delete(linesP,remove,axis=0)
 
     return linesP,flag
 
@@ -377,7 +379,7 @@
     #Iterate through test line guesses
     for initLines in lineTests:
         if initLines[1,0]==0:
-            initLines = na.delete(initLines,1,axis=0)
+            initLines = np.delete(initLines,1,axis=0)
 
         #Do fitting with initLines as first guess
         linesP,flag=_complex_fit(x,yDat,yFit,initz,
@@ -421,7 +423,7 @@
     """
 
     #Set up a bunch of empty lines
-    testP = na.zeros((10,2,3))
+    testP = np.zeros((10,2,3))
 
     testP[0,0,:]=[1E18,20,initz]
     testP[1,0,:]=[1E18,40,initz]
@@ -542,7 +544,7 @@
                 errBound = 10*errBound*len(yb)
 
             #Generate a fit and find the difference to data
-            yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+            yFitb=_gen_flux_lines(xb,np.array([p]),speciesDict)
             dif =yb-yFitb
 
 
@@ -557,7 +559,7 @@
                 break
 
     #Remove all bad line fits
-    linesP = na.delete(linesP,removeLines,axis=0)
+    linesP = np.delete(linesP,removeLines,axis=0)
 
     return linesP 
 
@@ -755,7 +757,7 @@
             if firstLine: 
                 break
 
-    flux = na.exp(-y)
+    flux = np.exp(-y)
     return flux
 
 def _gen_tau(t, p, f, Gamma, lambda_unshifted):
@@ -768,7 +770,7 @@
     a=7.95774715459E-15*Gamma*lambda_unshifted/b
     x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
     
-    H = na.zeros(len(x))
+    H = np.zeros(len(x))
     H = voigt(a,x)
     
     tau = tau_o*H
@@ -910,9 +912,9 @@
 
             # Make the final line parameters. Its annoying because
             # one or both regions may have fit to nothing
-            if na.size(p1)> 0 and na.size(p2)>0:
-                p = na.r_[p1,p2]
-            elif na.size(p1) > 0:
+            if np.size(p1)> 0 and np.size(p2)>0:
+                p = np.r_[p1,p2]
+            elif np.size(p1) > 0:
                 p = p1
             else:
                 p = p2
@@ -952,7 +954,7 @@
             # max and min to prevent boundary errors
 
             flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
-            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+            flux = np.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
 
             #Find regions that are absorbing outside the region we fit
             flux_dif = 1 - flux
@@ -971,7 +973,7 @@
                 remove_lines.append(i)
     
     if remove_lines:
-        p = na.delete(p, remove_lines, axis=0)
+        p = np.delete(p, remove_lines, axis=0)
 
     return p
 

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -225,13 +225,13 @@
 
     # accumulate, if necessary
     if accumulation:
-        used = my_profile.used        
+        used = my_profile.used
         for field in my_profile.field_data:
             if weight_field is None:
                 my_profile.field_data[field][used] = \
                     np.cumsum(my_profile.field_data[field][used])
             else:
-                my_weight = my_profile.weight[:, 0]
+                my_weight = my_profile.weight
                 my_profile.field_data[field][used] = \
                   np.cumsum(my_profile.field_data[field][used] * my_weight[used]) / \
                   np.cumsum(my_weight[used])

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -27,7 +27,7 @@
 from yt.extern.six import add_metaclass
 
 from yt.config import ytcfg
-from yt.funcs import mylog
+from yt.funcs import mylog, ensure_dir_exists
 from yt.utilities.performance_counters import \
     time_function, \
     yt_counters

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -233,7 +233,6 @@
             fi += 1
         pi += npart
     num_p[0] = local_parts
-    del ds._instantiated_hierarchy
     del ds
 
 cdef class RockstarInterface:

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -22,14 +22,10 @@
 except ImportError:
     pass
 
-try:
-    import xspec
-    from scipy.integrate import cumtrapz
-    from scipy import stats        
-except ImportError:
-    pass
-from yt.utilities.on_demand_imports import _astropy
+from yt.utilities.on_demand_imports import _astropy, _scipy
+
 pyfits = _astropy.pyfits
+stats = _scipy.stats
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
@@ -212,11 +208,14 @@
         try:
             self.line_handle = pyfits.open(self.linefile)
         except IOError:
-            mylog.error("LINE file %s does not exist" % (self.linefile))
+            mylog.error("LINE file %s does not exist" % self.linefile)
+            raise IOError("LINE file %s does not exist" % self.linefile)
         try:
             self.coco_handle = pyfits.open(self.cocofile)
         except IOError:
-            mylog.error("COCO file %s does not exist" % (self.cocofile))
+            mylog.error("COCO file %s does not exist" % self.cocofile)
+            raise IOError("COCO file %s does not exist" % self.cocofile)
+
         self.Tvals = self.line_handle[1].data.field("kT")
         self.dTvals = np.diff(self.Tvals)
         self.minlam = self.wvbins.min()
@@ -224,18 +223,18 @@
     
     def _make_spectrum(self, element, tindex):
         
-        tmpspec = np.zeros((self.nchan))
+        tmpspec = np.zeros(self.nchan)
         
         i = np.where((self.line_handle[tindex].data.field('element') == element) &
                      (self.line_handle[tindex].data.field('lambda') > self.minlam) &
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
-        vec = np.zeros((self.nchan))
+        vec = np.zeros(self.nchan)
         E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.ndarray_view()
         if self.thermal_broad:
-            vec = np.zeros((self.nchan))
+            vec = np.zeros(self.nchan)
             sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value
             for E, sig, a in zip(E0, sigma, amp):
                 cdf = stats.norm(E,sig).cdf(ebins)
@@ -270,10 +269,10 @@
         """
         Get the thermal emission spectrum given a temperature *kT* in keV. 
         """
-        cspec_l = np.zeros((self.nchan))
-        mspec_l = np.zeros((self.nchan))
-        cspec_r = np.zeros((self.nchan))
-        mspec_r = np.zeros((self.nchan))
+        cspec_l = np.zeros(self.nchan)
+        mspec_l = np.zeros(self.nchan)
+        cspec_r = np.zeros(self.nchan)
+        mspec_r = np.zeros(self.nchan)
         tindex = np.searchsorted(self.Tvals, kT)-1
         if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
             return cspec_l*cm3/units.s, mspec_l*cm3/units.s

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -336,8 +336,8 @@
                                   registry=self.ds.unit_registry)
             if self.weight_field is None and not self._sum_only:
                 u_obj = Unit(units, registry=self.ds.unit_registry)
-                if (u_obj.is_code_unit and not u_obj.is_dimensionless) and \
-                  input_units != units or self.ds.no_cgs_equiv_length:
+                if ((u_obj.is_code_unit or self.ds.no_cgs_equiv_length) and
+                    not u_obj.is_dimensionless) and input_units != units:
                     final_unit = "(%s) * code_length" % units
                     self[field].convert_to_units(final_unit)
         for i in data.keys(): self[i] = data.pop(i)

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -129,6 +129,15 @@
         self._index = self.ds.index
         return self._index
 
+    def _debug(self):
+        """
+        When called from within a derived field, this will run pdb.  However,
+        during field detection, it will not.  This allows you to more easily
+        debug fields that are being called on actual objects.
+        """
+        import pdb
+        pdb.set_trace()
+
     def _set_default_field_parameters(self):
         self.field_parameters = {}
         for k,v in self._default_field_parameters.items():

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -148,6 +148,10 @@
             self[item] = self._read_data(item)
         return self[item]
 
+    def _debug(self):
+        # We allow this to pass through.
+        return
+
     def deposit(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))
 

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -107,7 +107,7 @@
         self.directory = os.path.dirname(self.dataset.filename)
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
-        self.index_filename = self.dataset.filename
+        self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
         #self.directory = os.path.dirname(self.index_filename)
         self._fhandle = file(self.index_filename,'rb')
         GridIndex.__init__(self, ds, dataset_type)
@@ -366,7 +366,7 @@
         # Unfortunately we now have to mandate that the index gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.
-        self.h
+        self.index
 
     def _set_code_unit_attributes(self):
         """
@@ -458,14 +458,13 @@
             self.hubble_constant = self.cosmological_simulation = 0.0
         self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
-        if self.specified_parameters.has_key("gamma") :
+        if self.specified_parameters.has_key("gamma"):
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
-        else :
+        else:
             self.parameters["Gamma"] = 5./3. 
         self.geometry = self.specified_parameters.get("geometry", "cartesian")
         self._handle.close()
 
-
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -25,6 +25,11 @@
 erg_units = "code_mass * (code_length/code_time)**2"
 rho_units = "code_mass / code_length**3"
 
+def velocity_field(comp):
+    def _velocity(field, data):
+        return data["athena", "momentum_%s" % comp]/data["athena","density"]
+    return _velocity
+
 class AthenaFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", ("code_mass/code_length**3", ["density"], None)),
@@ -41,19 +46,17 @@
     def setup_fluid_fields(self):
         # Add velocity fields
         for comp in "xyz":
-            vel_field = ("athena", "velocity_%s" % (comp))
-            mom_field = ("athena", "momentum_%s" % (comp))
+            vel_field = ("athena", "velocity_%s" % comp)
+            mom_field = ("athena", "momentum_%s" % comp)
             if vel_field in self.field_list:
                 self.add_output_field(vel_field, units="code_length/code_time")
-                self.alias(("gas","velocity_%s" % (comp)), vel_field,
+                self.alias(("gas","velocity_%s" % comp), vel_field,
                            units="cm/s")
             elif mom_field in self.field_list:
                 self.add_output_field(mom_field,
-                                      units="code_mass*code_length/code_time")
-                f = lambda data: data["athena","momentum_%s" % (comp)] / \
-                                 data["athena","density"]
-                self.add_field(("gas","velocity_%s" % (comp)),
-                               function=f, units = "cm/s")
+                                      units="code_mass/code_time/code_length**2")
+                self.add_field(("gas","velocity_%s" % comp),
+                               function=velocity_field(comp), units = "cm/s")
         # Add pressure, energy, and temperature fields
         def ekin1(data):
             return 0.5*(data["athena","momentum_x"]**2 +
@@ -96,6 +99,8 @@
                            function=_total_energy,
                            units="erg/g")
         elif ("athena","total_energy") in self.field_list:
+            self.add_output_field(("athena","total_energy"),
+                                  units=pres_units)
             def _pressure(field, data):
                 return eint_from_etot(data)*(data.ds.gamma-1.0)
             self.add_field(("gas","pressure"), function=_pressure,

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/frontends/athena/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -0,0 +1,59 @@
+"""
+Athena frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.athena.api import AthenaDataset
+
+_fields_cloud = ("scalar[0]", "density", "total_energy")
+
+cloud = "ShockCloud/id0/Cloud.0050.vtk"
+ at requires_ds(cloud)
+def test_cloud():
+    ds = data_dir_load(cloud)
+    yield assert_equal, str(ds), "Cloud.0050"
+    for test in small_patch_amr(cloud, _fields_cloud):
+        test_cloud.__name__ = test.description
+        yield test
+
+_fields_blast = ("temperature", "density", "velocity_magnitude")
+
+blast = "MHDBlast/id0/Blast.0100.vtk"
+ at requires_ds(blast)
+def test_blast():
+    ds = data_dir_load(blast)
+    yield assert_equal, str(ds), "Blast.0100"
+    for test in small_patch_amr(blast, _fields_blast):
+        test_blast.__name__ = test.description
+        yield test
+
+parameters_stripping = {"time_unit":3.086e14,
+                        "length_unit":8.0236e22,
+                        "mass_unit":9.999e-30*8.0236e22**3}
+
+_fields_stripping = ("temperature", "density", "specific_scalar[0]")
+
+stripping = "RamPressureStripping/id0/rps.0062.vtk"
+ at requires_ds(stripping, big_data=True)
+def test_stripping():
+    ds = data_dir_load(stripping, kwargs={"parameters":parameters_stripping})
+    yield assert_equal, str(ds), "rps.0062"
+    for test in small_patch_amr(stripping, _fields_stripping):
+        test_stripping.__name__ = test.description
+        yield test

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/frontends/halo_catalogs/rockstar/definitions.py
--- a/yt/frontends/halo_catalogs/rockstar/definitions.py
+++ b/yt/frontends/halo_catalogs/rockstar/definitions.py
@@ -35,17 +35,25 @@
     ("unused", BINARY_HEADER_SIZE - 4*12 - 4 - 8*6 - 12, "c")
 )
 
-halo_dt = np.dtype([
+# Note the final field here, which is a field for min/max format revision in
+# which the field appears.
+
+KNOWN_REVISIONS=[0, 1]
+
+halo_dt = [
     ('particle_identifier', np.int64),
     ('particle_position_x', np.float32),
     ('particle_position_y', np.float32),
     ('particle_position_z', np.float32),
+    ('particle_mposition_x', np.float32, (0, 0)),
+    ('particle_mposition_y', np.float32, (0, 0)),
+    ('particle_mposition_z', np.float32, (0, 0)),
     ('particle_velocity_x', np.float32),
     ('particle_velocity_y', np.float32),
     ('particle_velocity_z', np.float32),
-    ('particle_corevel_x', np.float32),
-    ('particle_corevel_y', np.float32),
-    ('particle_corevel_z', np.float32),
+    ('particle_corevel_x', np.float32, (1, 100)),
+    ('particle_corevel_y', np.float32, (1, 100)),
+    ('particle_corevel_z', np.float32, (1, 100)),
     ('particle_bulkvel_x', np.float32),
     ('particle_bulkvel_y', np.float32),
     ('particle_bulkvel_z', np.float32),
@@ -75,15 +83,15 @@
     ('Ax', np.float32),
     ('Ay', np.float32),
     ('Az', np.float32),
-    ('b_to_a2', np.float32),
-    ('c_to_a2', np.float32),
-    ('A2x', np.float32),
-    ('A2y', np.float32),
-    ('A2z', np.float32),
+    ('b_to_a2', np.float32, (1, 100)),
+    ('c_to_a2', np.float32, (1, 100)),
+    ('A2x', np.float32, (1, 100)),
+    ('A2y', np.float32, (1, 100)),
+    ('A2z', np.float32, (1, 100)),
     ('bullock_spin', np.float32),
     ('kin_to_pot', np.float32),
-    ('m_pe_b', np.float32),
-    ('m_pe_d', np.float32),
+    ('m_pe_b', np.float32, (1, 100)),
+    ('m_pe_d', np.float32, (1, 100)),
     ('num_p', np.int64),
     ('num_child_particles', np.int64),
     ('p_start', np.int64),
@@ -93,7 +101,20 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
-], align=True)
+]
+
+halo_dts = {}
+
+for rev in KNOWN_REVISIONS:
+    halo_dts[rev] = []
+    for item in halo_dt:
+        if len(item) == 2:
+            halo_dts[rev].append(item)
+        else:
+            mi, ma = item[2]
+            if (mi <= rev) and (rev <= ma):
+                halo_dts[rev].append(item[:2])
+    halo_dts[rev] = np.dtype(halo_dts[rev], align=True)
 
 particle_dt = np.dtype([
     ('particle_identifier', np.int64),

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -24,7 +24,7 @@
     BaseIOHandler
 
 import yt.utilities.fortran_utils as fpu
-from .definitions import halo_dt
+from .definitions import halo_dts
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
@@ -32,6 +32,10 @@
 class IOHandlerRockstarBinary(BaseIOHandler):
     _dataset_type = "rockstar_binary"
 
+    def __init__(self, *args, **kwargs):
+        super(IOHandlerRockstarBinary, self).__init__(*args, **kwargs)
+        self._halo_dt = halo_dts[self.ds.parameters['format_revision']]
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
@@ -45,11 +49,12 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
+        
         for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
-                halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+                halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
                 x = halos['particle_position_x'].astype("float64")
                 y = halos['particle_position_y'].astype("float64")
                 z = halos['particle_position_z'].astype("float64")
@@ -70,7 +75,7 @@
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     f.seek(data_file._position_offset, os.SEEK_SET)
-                    halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+                    halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
                     x = halos['particle_position_x'].astype("float64")
                     y = halos['particle_position_y'].astype("float64")
                     z = halos['particle_position_z'].astype("float64")
@@ -89,7 +94,7 @@
         ind = 0
         with open(data_file.filename, "rb") as f:
             f.seek(data_file._position_offset, os.SEEK_SET)
-            halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+            halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
             pos = np.empty((halos.size, 3), dtype="float64")
             # These positions are in Mpc, *not* "code" units
             pos = data_file.ds.arr(pos, "code_length")
@@ -121,6 +126,6 @@
         return {'halos': data_file.header['num_halos']}
 
     def _identify_fields(self, data_file):
-        fields = [("halos", f) for f in halo_dt.fields if
+        fields = [("halos", f) for f in self._halo_dt.fields if
                   "padding" not in f]
         return fields, {}

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -344,21 +344,26 @@
                         for ptype in self._ptypes):
                 continue
             pos += 4
+            any_ptypes = False
             for ptype in self._ptypes:
                 if field == "Mass" and ptype not in self.var_mass:
                     continue
                 if (ptype, field) not in field_list:
                     continue
                 offsets[(ptype, field)] = pos
+                any_ptypes = True
                 if field in self._vector_fields:
                     pos += 3 * pcount[ptype] * fs
                 else:
                     pos += pcount[ptype] * fs
             pos += 4
+            if not any_ptypes: pos -= 8
         if file_size is not None:
             if file_size != pos:
                 mylog.warning("Your Gadget-2 file may have extra " +
-                              "columns or different precision!")
+                              "columns or different precision!" +
+                              " (%s file vs %s computed)",
+                              file_size, pos)
         return offsets
 
     def _identify_fields(self, domain):

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -758,7 +758,7 @@
 
 def big_patch_amr(ds_fn, fields, input_center="max", input_weight="density"):
     if not can_run_ds(ds_fn): return
-    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    dso = [ None, ("sphere", (input_center, (0.1, 'unitary')))]
     yield GridHierarchyTest(ds_fn)
     yield ParentageRelationshipsTest(ds_fn)
     for field in fields:

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -252,7 +252,7 @@
 
 axis_wcs = [[1,2],[0,2],[0,1]]
 
-def construct_image(data_source):
+def construct_image(data_source, center=None):
     ds = data_source.ds
     axis = data_source.axis
     if hasattr(ds, "wcs"):
@@ -266,11 +266,14 @@
     else:
         # This is some other kind of dataset
         unit = ds.get_smallest_appropriate_unit(ds.domain_width.max())
+        if center is None:
+            crval = [0.0,0.0]
+        else:
+            crval = [(ds.domain_center-center)[idx].in_units(unit) for idx in axis_wcs[axis]]
         dx = ds.index.get_smallest_dx()
         nx, ny = (ds.domain_width[axis_wcs[axis]]/dx).ndarray_view().astype("int")
         crpix = [0.5*(nx+1), 0.5*(ny+1)]
         cdelt = [dx.in_units(unit)]*2
-        crval = [ds.domain_center[idx].in_units(unit) for idx in axis_wcs[axis]]
         cunit = [unit]*2
         ctype = ["LINEAR"]*2
     frb = data_source.to_frb((1.0,"unitary"), (nx,ny))
@@ -295,7 +298,7 @@
     fields : string or list of strings
         The fields to slice
     center : A sequence floats, a string, or a tuple.
-         The coordinate of the center of the image. If set to 'c', 'center' or
+         The coordinate of the origin of the image. If set to 'c', 'center' or
          left blank, the plot is centered on the middle of the domain. If set to
          'max' or 'm', the center will be located at the maximum of the
          ('gas', 'density') field. Units can be specified by passing in center
@@ -308,7 +311,7 @@
         axis = fix_axis(axis, ds)
         center = get_sanitized_center(center, ds)
         slc = ds.slice(axis, center[axis], **kwargs)
-        w, frb = construct_image(slc)
+        w, frb = construct_image(slc, center=center)
         super(FITSSlice, self).__init__(frb, fields=fields, wcs=w)
         for i, field in enumerate(fields):
             self[i].header["bunit"] = str(frb[field].units)
@@ -327,12 +330,21 @@
         The fields to project
     weight_field : string
         The field used to weight the projection.
+    center : A sequence floats, a string, or a tuple.
+        The coordinate of the origin of the image. If set to 'c', 'center' or
+        left blank, the plot is centered on the middle of the domain. If set to
+        'max' or 'm', the center will be located at the maximum of the
+        ('gas', 'density') field. Units can be specified by passing in center
+        as a tuple containing a coordinate and string unit name or by passing
+        in a YTArray.  If a list or unitless array is supplied, code units are
+        assumed.
     """
-    def __init__(self, ds, axis, fields, weight_field=None, **kwargs):
+    def __init__(self, ds, axis, fields, center="c", weight_field=None, **kwargs):
         fields = ensure_list(fields)
         axis = fix_axis(axis, ds)
+        center = get_sanitized_center(center, ds)
         prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs)
-        w, frb = construct_image(prj)
+        w, frb = construct_image(prj, center=center)
         super(FITSProjection, self).__init__(frb, fields=fields, wcs=w)
         for i, field in enumerate(fields):
             self[i].header["bunit"] = str(frb[field].units)

diff -r be508def196fd99293e3f4f2b0799c31782f5d3e -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -10,7 +10,20 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+class NotAModule(object):
+    """
+    A class to implement an informative error message that will be outputted if
+    someone tries to use an on-demand import without having the requisite package installed.
+    """
+    def __init__(self, pkg_name):
+        self.pkg_name = pkg_name
+
+    def __getattr__(self, item):
+        raise ImportError("This functionality requires the %s package to be installed."
+                          % self.pkg_name)
+
 class astropy_imports:
+    _name = "astropy"
     _pyfits = None
     @property
     def pyfits(self):
@@ -19,7 +32,7 @@
                 import astropy.io.fits as pyfits
                 self.log
             except ImportError:
-                pyfits = None
+                pyfits = NotAModule(self._name)
             self._pyfits = pyfits
         return self._pyfits
 
@@ -31,7 +44,7 @@
                 import astropy.wcs as pywcs
                 self.log
             except ImportError:
-                pywcs = None
+                pywcs = NotAModule(self._name)
             self._pywcs = pywcs
         return self._pywcs
 
@@ -44,7 +57,7 @@
                 if log.exception_logging_enabled():
                     log.disable_exception_logging()
             except ImportError:
-                log = None
+                log = NotAModule(self._name)
             self._log = log
         return self._log
 
@@ -55,7 +68,7 @@
             try:
                 from astropy import units
             except ImportError:
-                units = None
+                units = NotAModule(self._name)
             self._units = units
         return self._units
 
@@ -67,8 +80,56 @@
                 import astropy.convolution as conv
                 self.log
             except ImportError:
-                conv = None
+                conv = NotAModule(self._name)
             self._conv = conv
         return self._conv
 
-_astropy = astropy_imports()
\ No newline at end of file
+_astropy = astropy_imports()
+
+class scipy_imports:
+    _name = "scipy"
+    _integrate = None
+    @property
+    def integrate(self):
+        if self._integrate is None:
+            try:
+                import scipy.integrate as integrate
+            except ImportError:
+                integrate = NotAModule(self._name)
+            self._integrate = integrate
+        return self._integrate
+
+    _stats = None
+    @property
+    def stats(self):
+        if self._stats is None:
+            try:
+                import scipy.stats as stats
+            except ImportError:
+                stats = NotAModule(self._name)
+            self._stats = stats
+        return self._stats
+
+    _optimize = None
+    @property
+    def optimize(self):
+        if self._optimize is None:
+            try:
+                import scipy.optimize as optimize
+            except ImportError:
+                optimize = NotAModule(self._name)
+            self._optimize = optimize
+        return self._optimize
+
+    _interpolate = None
+    @property
+    def interpolate(self):
+        if self._interpolate is None:
+            try:
+                import scipy.interpolate as interpolate
+            except ImportError:
+                interpolate = NotAModule(self._name)
+            self._interpolate = interpolate
+        return self._interpolate
+
+_scipy = scipy_imports()
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/bfa233572c1d/
Changeset:   bfa233572c1d
Branch:      yt
User:        atmyers
Date:        2014-08-22 21:43:01+00:00
Summary:     fixing a docstring typo
Affected #:  1 file

diff -r f58d61821879b4e8e065e5dbf7cc181ebae88e1c -r bfa233572c1d1dd8fb85e4b7190acc5795ebd5e8 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -955,7 +955,7 @@
     x_min : float
         The minimum value of the x profile field.
     x_max : float
-        The maximum value of hte x profile field.
+        The maximum value of the x profile field.
     x_log : boolean
         Controls whether or not the bins for the x field are evenly
         spaced in linear (False) or log (True) space.


https://bitbucket.org/yt_analysis/yt/commits/85f0fe1d0544/
Changeset:   85f0fe1d0544
Branch:      yt
User:        atmyers
Date:        2014-08-22 21:59:05+00:00
Summary:     reading periodicity information from Chombo datasets
Affected #:  1 file

diff -r bfa233572c1d1dd8fb85e4b7190acc5795ebd5e8 -r 85f0fe1d0544467618b8d0577e44850ed33074eb yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -311,6 +311,9 @@
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
         
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self._determine_periodic()
+
+    def _determine_periodic(self):
         self.periodicity = (True, True, True)
 
     def _calc_left_edge(self):
@@ -434,7 +437,7 @@
         self.domain_right_edge = self._calc_right_edge()
         self.domain_dimensions = self._calc_domain_dimensions()
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True, True, True)
+        self._determine_periodic()
 
     def _parse_inputs_file(self, ini_filename):
         self.fullplotdir = os.path.abspath(self.parameter_filename)
@@ -503,6 +506,15 @@
         if self.dimensionality == 2:
             self._field_info_class = ChomboPICFieldInfo2D
 
+    def _determine_periodic(self):
+        is_periodic = np.array([True, True, True])
+        for dir in [0, 1, 2]:
+            try:
+                is_periodic[dir] = self._handle['/level_0'].attrs['is_periodic_%d' % dir]
+            except KeyError:
+                is_periodic[dir] = True
+        self.periodicity = tuple(is_periodic)
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
 


https://bitbucket.org/yt_analysis/yt/commits/6189126b61e5/
Changeset:   6189126b61e5
Branch:      yt
User:        atmyers
Date:        2014-08-22 23:23:56+00:00
Summary:     making the ChomboPIC particle fields default to take_log=False
Affected #:  1 file

diff -r 85f0fe1d0544467618b8d0577e44850ed33074eb -r 6189126b61e591c97180d3ce07f22bb6f3e49ad4 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -14,8 +14,13 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+from yt.units.unit_object import Unit
 from yt.fields.field_info_container import \
-    FieldInfoContainer
+    FieldInfoContainer, \
+    particle_deposition_functions, \
+    particle_vector_functions, \
+    standard_particle_fields
+
 from yt.frontends.boxlib.fields import \
     rho_units, \
     mom_units, \
@@ -107,6 +112,62 @@
         ("particle_velocity_z", ("code_length / code_time", [], None)),
     )
 
+    def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
+        skip_output_units = ("code_length",)
+        for f, (units, aliases, dn) in sorted(self.known_particle_fields):
+            units = self.ds.field_units.get((ptype, f), units)
+            if (f in aliases or ptype not in self.ds.particle_types_raw) and \
+                units not in skip_output_units:
+                u = Unit(units, registry = self.ds.unit_registry)
+                output_units = str(u.get_cgs_equivalent())
+            else:
+                output_units = units
+            if (ptype, f) not in self.field_list:
+                continue
+            self.add_output_field((ptype, f),
+                units = units, particle_type = True,
+                display_name = dn, output_units = output_units, take_log=False)
+            for alias in aliases:
+                self.alias((ptype, alias), (ptype, f), units = output_units)
+
+        # We'll either have particle_position or particle_position_[xyz]
+        if (ptype, "particle_position") in self.field_list or \
+           (ptype, "particle_position") in self.field_aliases:
+            particle_scalar_functions(ptype,
+                   "particle_position", "particle_velocity",
+                   self)
+        else:
+            # We need to check to make sure that there's a "known field" that
+            # overlaps with one of the vector fields.  For instance, if we are
+            # in the Stream frontend, and we have a set of scalar position
+            # fields, they will overlap with -- and be overridden by -- the
+            # "known" vector field that the frontend creates.  So the easiest
+            # thing to do is to simply remove the on-disk field (which doesn't
+            # exist) and replace it with a derived field.
+            if (ptype, "particle_position") in self and \
+                 self[ptype, "particle_position"]._function == NullFunc:
+                self.pop((ptype, "particle_position"))
+            particle_vector_functions(ptype,
+                    ["particle_position_%s" % ax for ax in 'xyz'],
+                    ["particle_velocity_%s" % ax for ax in 'xyz'],
+                    self)
+        particle_deposition_functions(ptype, "particle_position",
+            "particle_mass", self)
+        standard_particle_fields(self, ptype)
+        # Now we check for any leftover particle fields
+        for field in sorted(self.field_list):
+            if field in self: continue
+            if not isinstance(field, tuple):
+                raise RuntimeError
+            if field[0] not in self.ds.particle_types:
+                continue
+            self.add_output_field(field, 
+                                  units = self.ds.field_units.get(field, ""),
+                                  particle_type = True)
+        self.setup_smoothed_fields(ptype, 
+                                   num_neighbors=num_neighbors,
+                                   ftype=ftype)
+
 def _dummy_position(field, data):
     return 0.5*np.ones_like(data['particle_position_x'])
 
@@ -119,7 +180,7 @@
 fluid_field_types = ['chombo', 'gas']
 particle_field_types = ['io', 'all']
 
-class ChomboPICFieldInfo2D(FieldInfoContainer):
+class ChomboPICFieldInfo2D(ChomboPICFieldInfo3D):
     known_other_fields = (
         ("density", (rho_units, ["density", "Density"], None)),
         ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
@@ -150,7 +211,7 @@
                            particle_type = True,
                            units = "code_length / code_time")
 
-class ChomboPICFieldInfo1D(FieldInfoContainer):
+class ChomboPICFieldInfo1D(ChomboPICFieldInfo3D):
     known_other_fields = (
         ("density", (rho_units, ["density", "Density"], None)),
         ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),


https://bitbucket.org/yt_analysis/yt/commits/9ca355125d5c/
Changeset:   9ca355125d5c
Branch:      yt
User:        atmyers
Date:        2014-08-24 01:57:47+00:00
Summary:     setting up code units correctly in Chombo
Affected #:  1 file

diff -r 6189126b61e591c97180d3ce07f22bb6f3e49ad4 -r 9ca355125d5c64e425a70943cb5839935cb6ce17 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -280,10 +280,13 @@
         self.parameters["EOSType"] = -1 # default
 
     def _set_code_unit_attributes(self):
-        self.length_unit = YTQuantity(1.0, "cm")
-        self.mass_unit = YTQuantity(1.0, "g")
-        self.time_unit = YTQuantity(1.0, "s")
-        self.velocity_unit = YTQuantity(1.0, "cm/s")
+        mylog.warning("Setting code length to be 1.0 cm")
+        mylog.warning("Setting code mass to be 1.0 g")
+        mylog.warning("Setting code time to be 1.0 s")
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.time_unit = self.quan(1.0, "s")
+        self.velocity_unit = self.length_unit / self.time_unit
 
     def _localize(self, f, default):
         if f is None:


https://bitbucket.org/yt_analysis/yt/commits/f48e49c378d4/
Changeset:   f48e49c378d4
Branch:      yt
User:        atmyers
Date:        2014-08-24 04:17:38+00:00
Summary:     removing code_length from skip_output_units
Affected #:  1 file

diff -r 9ca355125d5c64e425a70943cb5839935cb6ce17 -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -113,7 +113,7 @@
     )
 
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
-        skip_output_units = ("code_length",)
+        skip_output_units = () #("code_length",)
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):
             units = self.ds.field_units.get((ptype, f), units)
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \


https://bitbucket.org/yt_analysis/yt/commits/db8f8f09a001/
Changeset:   db8f8f09a001
Branch:      yt
User:        atmyers
Date:        2014-08-24 04:19:40+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  20 files

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -39,6 +39,13 @@
         padding-top: 10px;
         padding-bottom: 10px;
     }
+    /* since 3.1.0 */
+    .navbar-collapse.collapse.in { 
+        display: block!important;
+    }
+    .collapsing {
+        overflow: hidden!important;
+    }
 }
 
 /* 

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:f3e6416e4807e008a016ad8c7dfc8e78cab0d7519498458660554a4c88549c23"
+  "signature": "sha256:5a1547973517987ff047f1b2405277a0e98392e8fd5ffe04521cb2dc372d32d3"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -83,7 +83,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# Build a transfer function that is a multivariate gaussian in Density\n",
+      "# Build a transfer function that is a multivariate gaussian in temperature\n",
       "tfh = yt.TransferFunctionHelper(ds)\n",
       "tfh.set_field('temperature')\n",
       "tfh.set_log(True)\n",
@@ -180,4 +180,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -6,6 +6,7 @@
     config.make_config_py()  # installs __config__.py
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("cosmological_observation")
+    config.add_subpackage("halo_analysis")
     config.add_subpackage("halo_finding")
     config.add_subpackage("halo_mass_function")
     config.add_subpackage("level_sets")

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -447,8 +447,14 @@
 
     @contextmanager
     def _field_parameter_state(self, field_parameters):
+        # What we're doing here is making a copy of the incoming field
+        # parameters, and then updating it with our own.  This means that we'll
+        # be using our own center, if set, rather than the supplied one.  But
+        # it also means that any additionally set values can override it.
         old_field_parameters = self.field_parameters
-        self.field_parameters = field_parameters
+        new_field_parameters = field_parameters.copy()
+        new_field_parameters.update(old_field_parameters)
+        self.field_parameters = new_field_parameters
         yield
         self.field_parameters = old_field_parameters
 

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -517,15 +517,15 @@
     Parameters
     ----------
     center : array_like 
-        coordinate to which the normal, radius, and height all reference; in
-        the center of one of the bases of the cylinder
+        coordinate to which the normal, radius, and height all reference
     normal : array_like
         the normal vector defining the direction of lengthwise part of the 
         cylinder
     radius : float
         the radius of the cylinder
     height : float
-        the height of the lengthwise part of the cylinder
+        the distance from the midplane of the cylinder to the top and 
+        bottom planes
     fields : array of fields, optional
         any fields to be pre-loaded in the cylinder object
     ds: Dataset, optional
@@ -543,14 +543,15 @@
     >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc'))
     """
     _type_name = "disk"
-    _con_args = ('center', '_norm_vec', '_radius', '_height')
+    _con_args = ('center', '_norm_vec', 'radius', 'height')
     def __init__(self, center, normal, radius, height, fields=None,
                  ds=None, **kwargs):
         YTSelectionContainer3D.__init__(self, center, fields, ds, **kwargs)
         self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
         self.set_field_parameter("normal", self._norm_vec)
-        self._height = fix_length(height, self.ds)
-        self._radius = fix_length(radius, self.ds)
+        self.set_field_parameter("center", self.center)
+        self.height = fix_length(height, self.ds)
+        self.radius = fix_length(radius, self.ds)
         self._d = -1.0 * np.dot(self._norm_vec, self.center)
 
 class YTRegionBase(YTSelectionContainer3D):
@@ -575,7 +576,7 @@
     _con_args = ('center', 'left_edge', 'right_edge')
     def __init__(self, center, left_edge, right_edge, fields = None,
                  ds = None, **kwargs):
-        YTSelectionContainer3D.__init__(self, center, fields, ds, **kwargs)
+        YTSelectionContainer3D.__init__(self, center, ds, **kwargs)
         if not isinstance(left_edge, YTArray):
             self.left_edge = self.ds.arr(left_edge, 'code_length')
         else:
@@ -615,7 +616,7 @@
     >>> import yt
     >>> ds = yt.load("RedshiftOutput0005")
     >>> c = [0.5,0.5,0.5]
-    >>> sphere = ds.sphere(c,1.*ds['kpc'])
+    >>> sphere = ds.sphere(c, (1., "kpc"))
     """
     _type_name = "sphere"
     _con_args = ('center', 'radius')
@@ -627,6 +628,7 @@
             raise YTSphereTooSmall(ds, radius.in_units("code_length"),
                                    self.index.get_smallest_dx().in_units("code_length"))
         self.set_field_parameter('radius',radius)
+        self.set_field_parameter("center", self.center)
         self.radius = radius
 
 class YTEllipsoidBase(YTSelectionContainer3D):

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -572,7 +572,7 @@
         return out
 
     # Now all the object related stuff
-    def all_data(self, find_max=False):
+    def all_data(self, find_max=False, **kwargs):
         """
         all_data is a wrapper to the Region object for creating a region
         which covers the entire simulation domain.
@@ -580,7 +580,7 @@
         if find_max: c = self.find_max("density")[1]
         else: c = (self.domain_right_edge + self.domain_left_edge)/2.0
         return self.region(c,
-            self.domain_left_edge, self.domain_right_edge)
+            self.domain_left_edge, self.domain_right_edge, **kwargs)
 
     def box(self, left_edge, right_edge, **kwargs):
         """

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -82,9 +82,9 @@
             raise NeedsParameter("omega_baryon")
         co = data.ds.cosmology
         # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
-        # mean density(z) ~ omega_matter * (1 + z)^3
+        # mean matter density(z) ~ omega_matter * (1 + z)^3
         return data[ftype, "density"] / omega_baryon / co.critical_density(0.0) / \
-          (1.0 + data.ds.hubble_constant)**3
+          (1.0 + data.ds.current_redshift)**3
 
     registry.add_field((ftype, "baryon_overdensity"),
                        function=_baryon_overdensity,
@@ -99,9 +99,9 @@
         co = data.ds.cosmology
         # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
         # mean density(z) ~ omega_matter * (1 + z)^3
-        return data[ftype, "density"] / data.ds.omega_matter / \
+        return data[ftype, "matter_density"] / data.ds.omega_matter / \
           co.critical_density(0.0) / \
-          (1.0 + data.ds.hubble_constant)**3
+          (1.0 + data.ds.current_redshift)**3
 
     registry.add_field((ftype, "matter_overdensity"),
                        function=_matter_overdensity,

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -15,6 +15,9 @@
 
 import numpy as np
 
+from yt.utilities.lib.geometry_utils import \
+    obtain_rvec
+
 def get_radius(data, field_prefix):
     center = data.get_field_parameter("center").in_units("cm")
     DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).in_units("cm")
@@ -43,3 +46,17 @@
     # Alias it, just for clarity.
     radius = radius2
     return radius
+
+def get_periodic_rvec(data):
+    coords = obtain_rvec(data)
+    if sum(data.ds.periodicity) == 0: return coords
+    le = data.ds.domain_left_edge.in_units("code_length").d
+    dw = data.ds.domain_width.in_units("code_length").d
+    for i in range(coords.shape[0]):
+        if not data.ds.periodicity[i]: continue
+        coords[i, ...] -= le[i]
+        coords[i, ...] = np.min([np.abs(np.mod(coords[i, ...],  dw[i])),
+                                 np.abs(np.mod(coords[i, ...], -dw[i]))],
+                                 axis=0)
+        coords[i, ...] += le[i]
+    return coords

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -22,6 +22,7 @@
     ValidateSpatial
 
 from .field_functions import \
+     get_periodic_rvec, \
      get_radius
 
 from .field_plugin_registry import \
@@ -39,9 +40,6 @@
     get_sph_theta, get_sph_phi, \
     periodic_dist, euclidean_dist
 
-from yt.utilities.lib.geometry_utils import \
-    obtain_rvec
-
 @register_field_plugin
 def setup_geometric_fields(registry, ftype = "gas", slice_info = None):
 
@@ -92,12 +90,8 @@
 
     ### spherical coordinates: r (radius)
     def _spherical_r(field, data):
-        center = data.get_field_parameter("center")
-        coords = data.ds.arr(obtain_rvec(data), "code_length")
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
-        return get_sph_r(coords).in_cgs()
+        coords = get_periodic_rvec(data)
+        return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
 
     registry.add_field(("index", "spherical_r"),
              function=_spherical_r,
@@ -106,27 +100,19 @@
 
     ### spherical coordinates: theta (angle with respect to normal)
     def _spherical_theta(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_sph_theta(coords, normal)
 
     registry.add_field(("index", "spherical_theta"),
              function=_spherical_theta,
              validators=[ValidateParameter("center"),
-             ValidateParameter("normal")])
+                         ValidateParameter("normal")])
 
     ### spherical coordinates: phi (angle in the plane perpendicular to the normal)
     def _spherical_phi(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_sph_phi(coords, normal)
 
     registry.add_field(("index", "spherical_phi"),
@@ -136,29 +122,21 @@
 
     ### cylindrical coordinates: R (radius in the cylinder's plane)
     def _cylindrical_r(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
 
     registry.add_field(("index", "cylindrical_r"),
              function=_cylindrical_r,
              validators=[ValidateParameter("center"),
-                        ValidateParameter("normal")],
+                         ValidateParameter("normal")],
              units="cm")
 
     ### cylindrical coordinates: z (height above the cylinder's plane)
     def _cylindrical_z(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = data.ds.arr(obtain_rvec(data), "code_length")
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
-        return get_cyl_z(coords, normal).in_cgs()
+        coords = get_periodic_rvec(data)
+        return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
 
     registry.add_field(("index", "cylindrical_z"),
              function=_cylindrical_z,
@@ -168,12 +146,8 @@
 
     ### cylindrical coordinates: theta (angle in the cylinder's plane)
     def _cylindrical_theta(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_cyl_theta(coords, normal)
 
     registry.add_field(("index", "cylindrical_theta"),

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -116,7 +116,11 @@
                                 "bulk_%s" % basename)
         theta = data['index', 'spherical_theta']
         phi   = data['index', 'spherical_phi']
-        return get_sph_r_component(vectors, theta, phi, normal)
+        rv = get_sph_r_component(vectors, theta, phi, normal)
+        # Now, anywhere that radius is in fact zero, we want to zero out our
+        # return values.
+        rv[np.isnan(theta)] = 0.0
+        return rv
     def _radial_absolute(field, data):
         return np.abs(data[ftype, "radial_%s" % basename])
 

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -566,8 +566,10 @@
         # Skip timesteps per level
         header_file.readline()
         self._header_mesh_start = header_file.tell()
-        header_file.next()
-        next_line = header_file.next()
+        # Skip the cell size information per level - we'll get this later
+        for i in range(self._max_level+1): header_file.readline()
+        # Get the geometry
+        next_line = header_file.readline()
         if len(next_line.split()) == 1:
             coordinate_type = int(next_line)
         else:
@@ -780,7 +782,7 @@
                 line = f.next()
             # get the runtime parameters
             for line in f:
-                p, v = (_.strip() for _ in line[4:].split("="))
+                p, v = (_.strip() for _ in line[4:].split("=",1))
                 if len(v) == 0:
                     self.parameters[p] = ""
                 else:

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -286,7 +286,7 @@
             active_particles = True
             nap = dict((ap_type, []) for ap_type in 
                 params["Physics"]["ActiveParticles"]["ActiveParticlesEnabled"])
-        elif version > 2.0:
+        elif version == 2.2:
             active_particles = True
             nap = {}
             for type in self.parameters.get("AppendActiveParticleType", []):

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -219,6 +219,7 @@
             self.add_output_field(
                 ("enzo", te_name),
                 units = "code_velocity**2")
+            self.alias(("gas", "total_energy"), ("enzo", te_name))
             def _tot_minus_kin(field, data):
                 return data[te_name] - 0.5*(
                     data["x-velocity"]**2.0
@@ -226,6 +227,7 @@
                     + data["z-velocity"]**2.0 )
             self.add_field(
                 ("gas", "thermal_energy"),
+                function = _tot_minus_kin,
                 units = "erg/g")
 
     def setup_particle_fields(self, ptype):

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -44,7 +44,10 @@
             if not hasattr(v, "shape") or v.dtype == "O":
                 continue
             elif len(v.dims) == 1:
-                if add_io: fields.append( ("io", str(name)) )
+                if grid.ds.dimensionality == 1:
+                    fields.append( ("enzo", str(name)) )
+                elif add_io:
+                    fields.append( ("io", str(name)) )
             else:
                 fields.append( ("enzo", str(name)) )
         f.close()
@@ -238,12 +241,14 @@
         fields = []
         add_io = "io" in grid.ds.particle_types
         for name, v in self.grids_in_memory[grid.id].items():
-
             # NOTE: This won't work with 1D datasets or references.
             if not hasattr(v, "shape") or v.dtype == "O":
                 continue
             elif v.ndim == 1:
-                if add_io: fields.append( ("io", str(name)) )
+                if grid.ds.dimensionality == 1:
+                    fields.append( ("enzo", str(name)) )
+                elif add_io:
+                    fields.append( ("io", str(name)) )
             else:
                 fields.append( ("enzo", str(name)) )
         return fields

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -442,7 +442,7 @@
                         if child_mask[i, j, k] == 1 or this_level == 1:
                             mask[i, j, k] = self.select_cell(pos, dds)
                             total += mask[i, j, k]
-                        pos[2] += dds[1]
+                        pos[2] += dds[2]
                     pos[1] += dds[1]
                 pos[0] += dds[0]
         if total == 0: return None
@@ -785,9 +785,9 @@
         for i in range(3):
             self.norm_vec[i] = dobj._norm_vec[i]
             self.center[i] = _ensure_code(dobj.center[i])
-        self.radius = _ensure_code(dobj._radius)
+        self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
-        self.height = _ensure_code(dobj._height)
+        self.height = _ensure_code(dobj.height)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -799,12 +799,17 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef int select_point(self, np.float64_t pos[3]) nogil:
-        cdef np.float64_t h, d, r2, temp
-        cdef int i
+        cdef np.float64_t h, d, r2, temp, spos
+        cdef int i, j, k
         h = d = 0
-        for i in range(3):
-            temp = self.difference(pos[i], self.center[i], i)
-            h += temp * self.norm_vec[i]
+        for ax in range(3):
+            temp = 1e30
+            for i in range(3):
+                if self.periodicity[ax] == 0 and i != 1: continue
+                spos = pos[ax] + (i-1)*self.domain_width[ax]
+                if fabs(spos - self.center[ax]) < fabs(temp):
+                    temp = spos - self.center[ax]
+            h += temp * self.norm_vec[ax]
             d += temp*temp
         r2 = (d - h*h)
         if fabs(h) <= self.height and r2 <= self.radius2: return 1
@@ -831,6 +836,8 @@
     @cython.cdivision(True)
     cdef int select_bbox(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil:
+        # Until we can get our OBB/OBB intersection correct, disable this.
+        return 1
         cdef np.float64_t *arr[2]
         cdef np.float64_t pos[3], H, D, R2, temp
         cdef int i, j, k, n

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -369,32 +369,34 @@
 
 class GridBoundaryCallback(PlotCallback):
     """
-    annotate_grids(alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
-                 min_level=None, max_level=None, cmap='B-W LINEAR_r', edgecolors=None,
-                 linewidth=1.0):
+    annotate_grids(alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False,
+                   periodic=True, min_level=None, max_level=None,
+                   cmap='B-W LINEAR_r', edgecolors=None, linewidth=1.0):
 
-    Draws grids on an existing PlotWindow object.
-    Adds grid boundaries to a plot, optionally with alpha-blending. By default, 
-    colors different levels of grids with different colors going from white to
-    black, but you can change to any arbitrary colormap with cmap keyword, to all black
-    grid edges for all levels with cmap=None and edgecolors=None, or to an arbitrary single
-    color for grid edges with edgecolors='YourChosenColor' defined in any of the standard ways
-    (e.g., edgecolors='white', edgecolors='r', edgecolors='#00FFFF', or edgecolor='0.3', where
-    the last is a float in 0-1 scale indicating gray).
-    Note that setting edgecolors overrides cmap if you have both set to non-None values.
-    Cutoff for display is at min_pix wide. draw_ids puts the grid id in the corner of the grid.
-    (Not so great in projections...).  One can set min and maximum level of
-    grids to display, and can change the linewidth of the displayed grids.
+    Draws grids on an existing PlotWindow object.  Adds grid boundaries to a
+    plot, optionally with alpha-blending. By default, colors different levels of
+    grids with different colors going from white to black, but you can change to
+    any arbitrary colormap with cmap keyword, to all black grid edges for all
+    levels with cmap=None and edgecolors=None, or to an arbitrary single color
+    for grid edges with edgecolors='YourChosenColor' defined in any of the
+    standard ways (e.g., edgecolors='white', edgecolors='r',
+    edgecolors='#00FFFF', or edgecolor='0.3', where the last is a float in 0-1
+    scale indicating gray).  Note that setting edgecolors overrides cmap if you
+    have both set to non-None values.  Cutoff for display is at min_pix
+    wide. draw_ids puts the grid id in the corner of the grid.  (Not so great in
+    projections...).  One can set min and maximum level of grids to display, and
+    can change the linewidth of the displayed grids.
     """
     _type_name = "grids"
-    def __init__(self, alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
-                 min_level=None, max_level=None, cmap='B-W LINEAR_r', edgecolors=None,
-                 linewidth=1.0):
+
+    def __init__(self, alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False,
+                 periodic=True, min_level=None, max_level=None,
+                 cmap='B-W LINEAR_r', edgecolors=None, linewidth=1.0):
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
         self.min_pix_ids = min_pix_ids
-        self.draw_ids = draw_ids # put grid numbers in the corner.
+        self.draw_ids = draw_ids  # put grid numbers in the corner.
         self.periodic = periodic
         self.min_level = min_level
         self.max_level = max_level
@@ -437,20 +439,22 @@
             left_edge_y = np.array((GLE[:,py_index]+pyo-y0)*dy) + yy0
             right_edge_x = np.array((GRE[:,px_index]+pxo-x0)*dx) + xx0
             right_edge_y = np.array((GRE[:,py_index]+pyo-y0)*dy) + yy0
-            visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
-                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix ) & \
-                       ( levels >= min_level) & \
-                       ( levels <= max_level)
+            xwidth = xpix * (right_edge_x - left_edge_x) / (xx1 - xx0)
+            ywidth = ypix * (right_edge_y - left_edge_y) / (yy1 - yy0)
+            visible = np.logical_and(
+                np.logical_and(xwidth > self.min_pix, ywidth > self.min_pix),
+                np.logical_and(levels >= min_level, levels <= max_level))
 
             # Grids can either be set by edgecolors OR a colormap.
             if self.edgecolors is not None:
-                edgecolors = colorConverter.to_rgba(self.edgecolors, alpha=self.alpha)
+                edgecolors = colorConverter.to_rgba(
+                    self.edgecolors, alpha=self.alpha)
             else:  # use colormap if not explicity overridden by edgecolors
                 if self.cmap is not None:
-                    sample_levels = levels[(levels <= max_level) & (levels >= min_level)]
-                    color_bounds = [0,plot.data.pf.h.max_level]
-                    edgecolors = apply_colormap(sample_levels*1.0, color_bounds=color_bounds,
-                                                cmap_name=self.cmap)[0,:,:]*1.0/255.
+                    color_bounds = [0,plot.data.ds.index.max_level]
+                    edgecolors = apply_colormap(
+                        levels[visible]*1.0, color_bounds=color_bounds,
+                        cmap_name=self.cmap)[0,:,:]*1.0/255.
                     edgecolors[:,3] = self.alpha
                 else:
                     edgecolors = (0.0,0.0,0.0,self.alpha)
@@ -461,13 +465,16 @@
                  (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
             grid_collection = matplotlib.collections.PolyCollection(
-                verts, facecolors="none", edgecolors=edgecolors, linewidth=self.linewidth)
+                verts, facecolors="none", edgecolors=edgecolors,
+                linewidth=self.linewidth)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
 
             if self.draw_ids:
-                visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
-                               ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
+                visible_ids = np.logical_and(
+                    np.logical_and(xwidth > self.min_pix_ids,
+                                   ywidth > self.min_pix_ids),
+                    np.logical_and(levels >= min_level, levels <= max_level))
                 active_ids = np.unique(plot.data['grid_indices'])
                 for i in np.where(visible_ids)[0]:
                     plot._axes.text(

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -154,7 +154,9 @@
             xmi, xma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
         else:
             xfunc = np.linspace
-            xmi, xma = self.bounds
+            # Need to strip units off of the bounds to avoid a recursion error
+            # in matplotlib 1.3.1
+            xmi, xma = [np.float64(b) for b in self.bounds]
 
         x = xfunc(xmi, xma, tf.nbins)
         y = tf.funcs[3].y
@@ -166,7 +168,7 @@
         canvas = FigureCanvasAgg(fig)
         ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])
         ax.bar(x, tf.funcs[3].y, w, edgecolor=[0.0, 0.0, 0.0, 0.0],
-               log=True, color=colors, bottom=[0])
+               log=self.log, color=colors, bottom=[0])
 
         if profile_field is not None:
             try:
@@ -177,10 +179,12 @@
             if profile_field not in prof.keys():
                 prof.add_fields([profile_field], fractional=False,
                                 weight=profile_weight)
-            ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
-                    prof[profile_field].max(), color='w', linewidth=3)
-            ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
-                    prof[profile_field].max(), color='k')
+            # Strip units, if any, for matplotlib 1.3.1
+            xplot = np.array(prof[self.field])
+            yplot = np.array(prof[profile_field]*tf.funcs[3].y.max() /
+                             prof[profile_field].max())
+            ax.plot(xplot, yplot, color='w', linewidth=3)
+            ax.plot(xplot, yplot, color='k')
 
         ax.set_xscale({True: 'log', False: 'linear'}[self.log])
         ax.set_xlim(x.min(), x.max())
@@ -200,7 +204,7 @@
 
     def setup_profile(self, profile_field=None, profile_weight=None):
         if profile_field is None:
-            profile_field = 'CellVolume'
+            profile_field = 'cell_volume'
         prof = BinnedProfile1D(self.ds.all_data(), 128, self.field,
                                self.bounds[0], self.bounds[1],
                                log_space=self.log,

diff -r f48e49c378d44f9ae1b5036174d36f35fcbb78f6 -r db8f8f09a001e413e33b9465324a8f9d978cd141 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -48,6 +48,8 @@
     def __init__(self, x_bounds, nbins=256):
         self.pass_through = 0
         self.nbins = nbins
+        # Strip units off of x_bounds, if any
+        x_bounds = [np.float64(xb) for xb in x_bounds]
         self.x_bounds = x_bounds
         self.x = np.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
         self.y = np.zeros(nbins, dtype='float64')
@@ -353,6 +355,8 @@
     """
     def __init__(self, x_bounds, nbins=256, grey_opacity = False):
         MultiVariateTransferFunction.__init__(self)
+        # Strip units off of x_bounds, if any
+        x_bounds = [np.float64(xb) for xb in x_bounds]
         self.x_bounds = x_bounds
         self.nbins = nbins
         # This is all compatibility and convenience.
@@ -633,6 +637,7 @@
         >>> tf = ColorTransferFunction( (-10.0, -5.0) )
         >>> tf.sample_colormap(-7.0, 0.01, colormap='algae')
         """
+        v = np.float64(v)
         if col_bounds is None:
             rel = (v - self.x_bounds[0])/(self.x_bounds[1] - self.x_bounds[0])
         else:
@@ -680,7 +685,8 @@
         >>> tf.map_to_colormap(-6.0, -5.0, scale=10.0, colormap='algae',
         ...                    scale_func = linramp)
         """
-
+        mi = np.float64(mi)
+        ma = np.float64(ma)
         rel0 = int(self.nbins*(mi - self.x_bounds[0])/(self.x_bounds[1] -
                                                        self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
@@ -800,6 +806,8 @@
         if n_fields > 3:
             raise NotImplementedError
         MultiVariateTransferFunction.__init__(self)
+        # Strip units off of x_bounds, if any
+        x_bounds = [np.float64(xb) for xb in x_bounds]
         self.x_bounds = x_bounds
         self.nbins = 2
         self.linear_mapping = TransferFunction(x_bounds, 2)


https://bitbucket.org/yt_analysis/yt/commits/de491429dd33/
Changeset:   de491429dd33
Branch:      yt
User:        atmyers
Date:        2014-08-27 03:32:02+00:00
Summary:     cleaning up imports
Affected #:  1 file

diff -r db8f8f09a001e413e33b9465324a8f9d978cd141 -r de491429dd3346de7dbfbdc2159278ce3283ceb1 yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -16,7 +16,6 @@
 
 import __builtin__
 import base64
-import os
 import types
 
 from functools import wraps
@@ -25,12 +24,6 @@
 import numpy as np
 import cStringIO
 
-from .base_plot_types import ImagePlotMPL
-from .plot_container import \
-    ImagePlotContainer, \
-    log_transform, linear_transform
-from yt.data_objects.profiles import \
-    create_profile
 from yt.utilities.exceptions import \
     YTNotInsideNotebook
 from yt.utilities.logger import ytLogger as mylog
@@ -40,78 +33,18 @@
     get_image_suffix, \
     get_ipython_api_version
 from yt.units.unit_object import Unit
-
-def get_canvas(name):
-    suffix = get_image_suffix(name)
-    
-    if suffix == '':
-        suffix = '.png'
-    if suffix == ".png":
-        canvas_cls = mpl.FigureCanvasAgg
-    elif suffix == ".pdf":
-        canvas_cls = mpl.FigureCanvasPdf
-    elif suffix in (".eps", ".ps"):
-        canvas_cls = mpl.FigureCanvasPS
-    else:
-        mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
-        canvas_cls = mpl.FigureCanvasAgg
-    return canvas_cls
-
-def invalidate_plot(f):
-    @wraps(f)
-    def newfunc(*args, **kwargs):
-        rv = f(*args, **kwargs)
-        args[0]._plot_valid = False
-        args[0]._setup_plots()
-        return rv
-    return newfunc
-
-class FigureContainer(dict):
-    def __init__(self):
-        super(FigureContainer, self).__init__()
-
-    def __missing__(self, key):
-        figure = mpl.matplotlib.figure.Figure((10, 8))
-        self[key] = figure
-        return self[key]
-
-class AxesContainer(dict):
-    def __init__(self, fig_container):
-        self.fig_container = fig_container
-        self.ylim = {}
-        super(AxesContainer, self).__init__()
-
-    def __missing__(self, key):
-        figure = self.fig_container[key]
-        self[key] = figure.add_subplot(111)
-        return self[key]
-
-    def __setitem__(self, key, value):
-        super(AxesContainer, self).__setitem__(key, value)
-        self.ylim[key] = (None, None)
-
-def sanitize_label(label, nprofiles):
-    label = ensure_list(label)
-    
-    if len(label) == 1:
-        label = label * nprofiles
-    
-    if len(label) != nprofiles:
-        raise RuntimeError("Number of labels must match number of profiles")
-
-    for l in label:
-        if l is not None and not isinstance(l, basestring):
-            raise RuntimeError("All labels must be None or a string")
-
-    return label
+from .profile_plotter import \
+    get_canvas, \
+    invalidate_plot, \
+    sanitize_label
 
 class ParticlePlot(object):
     r"""
     Create a particle scatter plot from a data source.
 
     Given a data object (all_data, region, sphere, etc.), an x field, 
-    and a y field (or fields), this will a scatter plot with one marker
-    for each particle.
+    and a y field (both of particle type), this will create a scatter
+    plot with one marker for each particle.
 
     Parameters
     ----------
@@ -124,7 +57,7 @@
         The field to plot on the y-axis.
     plot_spec : dict or list of dicts
         A dictionary or list of dictionaries containing plot keyword 
-        arguments.  For example, dict('c'='r', marker='.').
+        arguments.  This will be passed For example, dict('c'='r', 'marker'='.').
         Default: dict('c'='b', 'marker'='.', 'linestyle'='None', 'markersize'=8)
 
     Examples
@@ -133,10 +66,10 @@
     >>> import yt
     >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
     >>> ad = ds.all_data()
-    >>> plot = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
+    >>> plot = yt.ParticlePlot(ad, 'particle_position_x', 'particle_velocity_x')
     >>> plot.save()
 
-    Use set_line_property to change line properties of one or all profiles.
+    Use set_line_property to change line properties.
     
     """
     x_log = None


https://bitbucket.org/yt_analysis/yt/commits/1f59d0e60045/
Changeset:   1f59d0e60045
Branch:      yt
User:        atmyers
Date:        2014-08-27 03:33:36+00:00
Summary:     fixing a syntax error in the examples
Affected #:  1 file

diff -r de491429dd3346de7dbfbdc2159278ce3283ceb1 -r 1f59d0e60045b047b164ec04800e1f6a1c44770e yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -57,8 +57,8 @@
         The field to plot on the y-axis.
     plot_spec : dict or list of dicts
         A dictionary or list of dictionaries containing plot keyword 
-        arguments.  This will be passed For example, dict('c'='r', 'marker'='.').
-        Default: dict('c'='b', 'marker'='.', 'linestyle'='None', 'markersize'=8)
+        arguments.  This will be passed For example, dict(c='r', marker='.').
+        Default: dict(c='b', marker='.', linestyle='None', markersize=8)
 
     Examples
     --------


https://bitbucket.org/yt_analysis/yt/commits/a3b387b374d8/
Changeset:   a3b387b374d8
Branch:      yt
User:        atmyers
Date:        2014-08-27 04:25:22+00:00
Summary:     adding support for labels
Affected #:  1 file

diff -r 1f59d0e60045b047b164ec04800e1f6a1c44770e -r a3b387b374d8a691c2e6e2256f40738f361dd38b yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -82,7 +82,7 @@
     _plot_valid = False
 
     def __init__(self, data_source, x_field, y_field,
-                 plot_spec=None):
+                 label=None, plot_spec=None):
 
         if plot_spec is None:
             plot_spec = {'c':'b', 'marker':'.', 'linestyle':'None', 'markersize':8}
@@ -90,6 +90,7 @@
         self.data_source = data_source
         self.x_field = x_field
         self.y_field = y_field
+        self.label = sanitize_label(label, 1)
         self.plot_spec = plot_spec
 
         self.x_data = self.data_source[x_field]
@@ -176,7 +177,7 @@
     def _setup_plots(self):
         self.axis.cla()
         self.axis.plot(np.array(self.x_data), np.array(self.y_data),
-                       **self.plot_spec)
+                       label=self.label, **self.plot_spec)
 
         xscale, yscale = self._get_axis_log()
         xtitle, ytitle = self._get_axis_titles()
@@ -190,6 +191,9 @@
         self.axis.set_xlim(*self.x_lim)
         self.axis.set_ylim(*self.y_lim)
 
+        if any(self.label):
+            self.axis.legend(loc="best")
+
         self._plot_valid = True
 
     @invalidate_plot


https://bitbucket.org/yt_analysis/yt/commits/e63fed03c276/
Changeset:   e63fed03c276
Branch:      yt
User:        atmyers
Date:        2014-08-27 21:47:35+00:00
Summary:     removing extraneous text from docstrings
Affected #:  3 files

diff -r a3b387b374d8a691c2e6e2256f40738f361dd38b -r e63fed03c276c18c986bc1ad628a04d4908c0a21 yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -57,7 +57,8 @@
         The field to plot on the y-axis.
     plot_spec : dict or list of dicts
         A dictionary or list of dictionaries containing plot keyword 
-        arguments.  This will be passed For example, dict(c='r', marker='.').
+        arguments.  This will be passed to pyplot.plot. 
+        For example, dict(c='r', marker='.').
         Default: dict(c='b', marker='.', linestyle='None', markersize=8)
 
     Examples
@@ -98,7 +99,7 @@
         
         self.figure = mpl.matplotlib.figure.Figure((10, 8))
         self.axis = self.figure.add_subplot(111)
-        self._setup_plots()
+        self._setup_plot()
 
     def save(self, name=None):
         r"""
@@ -111,7 +112,7 @@
 
          """
         if not self._plot_valid:
-            self._setup_plots()
+            self._setup_plot()
         if name is None:
             prefix = self.data_source.ds
             name = "%s.png" % prefix
@@ -133,8 +134,7 @@
         return fn
 
     def show(self):
-        r"""This will send any existing plots to the IPython notebook.
-        function name.
+        r"""This will send any the plot to the IPython notebook.
 
         If yt is being run from within an IPython session, and it is able to
         determine this, this function will send any existing plots to the
@@ -174,7 +174,7 @@
         ret += '<img src="data:image/png;base64,%s"><br>' % img
         return ret
 
-    def _setup_plots(self):
+    def _setup_plot(self):
         self.axis.cla()
         self.axis.plot(np.array(self.x_data), np.array(self.y_data),
                        label=self.label, **self.plot_spec)

diff -r a3b387b374d8a691c2e6e2256f40738f361dd38b -r e63fed03c276c18c986bc1ad628a04d4908c0a21 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -501,7 +501,6 @@
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.
-        function name.
 
         If yt is being run from within an IPython session, and it is able to
         determine this, this function will send any existing plots to the

diff -r a3b387b374d8a691c2e6e2256f40738f361dd38b -r e63fed03c276c18c986bc1ad628a04d4908c0a21 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -256,7 +256,6 @@
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.
-        function name.
 
         If yt is being run from within an IPython session, and it is able to
         determine this, this function will send any existing plots to the


https://bitbucket.org/yt_analysis/yt/commits/c6ef8495b889/
Changeset:   c6ef8495b889
Branch:      yt
User:        atmyers
Date:        2014-08-27 21:58:53+00:00
Summary:     fixing up docstrings
Affected #:  1 file

diff -r e63fed03c276c18c986bc1ad628a04d4908c0a21 -r c6ef8495b88927213f4ee6340b664a9985442c17 yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -137,7 +137,7 @@
         r"""This will send any the plot to the IPython notebook.
 
         If yt is being run from within an IPython session, and it is able to
-        determine this, this function will send any existing plots to the
+        determine this, this function will send the plot to the
         notebook for display.
 
         If yt can't determine if it's inside an IPython session, it will raise
@@ -148,7 +148,7 @@
 
         >>> import yt
         >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = ProfilePlot(ds.all_data(), 'density', 'temperature')
+        >>> pp = ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_position_y')
         >>> pp.show()
 
         """
@@ -199,7 +199,7 @@
     @invalidate_plot
     def set_line_property(self, property, value):
         r"""
-        Set properties for one or all lines to be plotted.
+        Set properties for the line on the plot.
 
         Parameters
         ----------
@@ -211,11 +211,8 @@
         Examples
         --------
 
-        Change all the lines in a plot
-        plot.set_line_property("linestyle", "-")
+        plot.set_line_property("marker", "+")
 
-        Change a single line.
-        plot.set_line_property("linewidth", 4, index=0)
         
         """
         specs = self.plot_spec
@@ -228,8 +225,7 @@
 
         Parameters
         ----------
-        field : string
-            the field to set a transform
+
         log : boolean
             Log on/off.
         """
@@ -242,8 +238,7 @@
 
         Parameters
         ----------
-        field : string
-            the field to set a transform
+
         log : boolean
             Log on/off.
         """
@@ -268,7 +263,7 @@
         elif field == self.y_field:
             self.y_data.convert_to_units(unit)
         else:
-            raise KeyError("Field %s not in profile plot!" % (field))
+            raise KeyError("Field %s not in the plot!" % (field))
         return self
 
     @invalidate_plot


https://bitbucket.org/yt_analysis/yt/commits/b5403ca197e7/
Changeset:   b5403ca197e7
Branch:      yt
User:        atmyers
Date:        2014-08-27 22:26:28+00:00
Summary:     changing _setup_plot back to _setup_plots
Affected #:  1 file

diff -r c6ef8495b88927213f4ee6340b664a9985442c17 -r b5403ca197e73e17b07b5575424e1bdb657e0b10 yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ b/yt/visualization/particle_plotter.py
@@ -99,7 +99,7 @@
         
         self.figure = mpl.matplotlib.figure.Figure((10, 8))
         self.axis = self.figure.add_subplot(111)
-        self._setup_plot()
+        self._setup_plots()
 
     def save(self, name=None):
         r"""
@@ -112,7 +112,7 @@
 
          """
         if not self._plot_valid:
-            self._setup_plot()
+            self._setup_plots()
         if name is None:
             prefix = self.data_source.ds
             name = "%s.png" % prefix
@@ -174,7 +174,7 @@
         ret += '<img src="data:image/png;base64,%s"><br>' % img
         return ret
 
-    def _setup_plot(self):
+    def _setup_plots(self):
         self.axis.cla()
         self.axis.plot(np.array(self.x_data), np.array(self.y_data),
                        label=self.label, **self.plot_spec)


https://bitbucket.org/yt_analysis/yt/commits/06c705a62111/
Changeset:   06c705a62111
Branch:      yt
User:        atmyers
Date:        2014-08-28 23:29:44+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  8 files

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -186,9 +186,20 @@
     this_f = getattr(frontends_module, frontend)
     field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
     dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
+
     if frontend == "sph":
         field_info_names = \
           ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    elif frontend == "boxlib":
+        field_info_names = []
+        for d in dataset_names:
+            if "Maestro" in d:  
+                field_info_names.append("MaestroFieldInfo")
+            elif "Castro" in d: 
+                field_info_names.append("CastroFieldInfo")
+            else: 
+                field_info_names.append("BoxlibFieldInfo")
+
     for dset_name, fi_name in zip(dataset_names, field_info_names):
         fi = getattr(this_f, fi_name)
         nfields = 0

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -580,56 +580,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +651,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -932,7 +929,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -170,10 +170,16 @@
 Developing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-If you plan to develop yt on Windows, we recommend using the `MinGW
+If you plan to develop yt on Windows, it is necessary to use the `MinGW
 <http://www.mingw.org/>`_ gcc compiler that can be installed using the `Anaconda
-Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the
-syntax for the setup command is slightly different; you must type:
+Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. The libpython package must be
+ installed from Anaconda as well. These can both be installed with a single command:
+
+.. code-block:: bash
+
+  $ conda install libpython mingw
+
+Additionally, the syntax for the setup command is slightly different; you must type:
 
 .. code-block:: bash
 

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -138,14 +138,14 @@
 
 .. _loading-orion-data:
 
-Boxlib Data
+BoxLib Data
 -----------
 
-yt has been tested with Boxlib data generated by Orion, Nyx, Maestro and
+yt has been tested with BoxLib data generated by Orion, Nyx, Maestro and
 Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
 Malone, Matthew Turk, and Mike Zingale.
 
-To load a Boxlib dataset, you can use the ``yt.load`` command on
+To load a BoxLib dataset, you can use the ``yt.load`` command on
 the plotfile directory name.  In general, you must also have the
 ``inputs`` file in the base directory, but Maestro and Castro will get
 all the necessary parameter information from the ``job_info`` file in
@@ -178,6 +178,18 @@
 For Maestro and Castro, you would not need the ``inputs`` file, and you 
 would have a ``job_info`` file in the plotfile directory.
 
+.. rubric:: Caveats
+
+* yt does not read the Maestro base state (although you can have Maestro
+  map it to a full Cartesian state variable before writing the plotfile
+  to get around this).  E-mail the dev list if you need this support.
+* yt does not know about particles in Maestro.
+* For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
+  depending on the value of the ``use_tfromp`` runtime parameter.
+* For Maestro, some velocity fields like ``velocity_magnitude`` or 
+  ``mach_number`` will always use the on-disk value, and not have yt 
+  derive it, due to the complex interplay of the base state velocity.
+
 .. _loading-enzo-data:
 
 Enzo Data

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -198,10 +198,9 @@
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on Microsoft Windows is only supported for Windows XP Service Pack
-3 and higher (both 32-bit and 64-bit) using Anaconda, see
-:ref:`anaconda-installation`.  Also see :ref:`windows-developing` for details on
-how to build yt from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
+:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
+from source in Windows.
 
 .. _source-installation:
 

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 doc/source/reference/field_list.rst
--- a/doc/source/reference/field_list.rst
+++ b/doc/source/reference/field_list.rst
@@ -853,9 +853,9 @@
               raise NeedsParameter("omega_baryon")
           co = data.ds.cosmology
           # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
-          # mean density(z) ~ omega_matter * (1 + z)^3
+          # mean matter density(z) ~ omega_matter * (1 + z)^3
           return data[ftype, "density"] / omega_baryon / co.critical_density(0.0) / \
-            (1.0 + data.ds.hubble_constant)**3
+            (1.0 + data.ds.current_redshift)**3
   
 
 ('gas', 'cell_mass')
@@ -1526,9 +1526,9 @@
           co = data.ds.cosmology
           # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
           # mean density(z) ~ omega_matter * (1 + z)^3
-          return data[ftype, "density"] / data.ds.omega_matter / \
+          return data[ftype, "matter_density"] / data.ds.omega_matter / \
             co.critical_density(0.0) / \
-            (1.0 + data.ds.hubble_constant)**3
+            (1.0 + data.ds.current_redshift)**3
   
 
 ('gas', 'mean_molecular_weight')
@@ -1747,7 +1747,11 @@
                                   "bulk_%s" % basename)
           theta = data['index', 'spherical_theta']
           phi   = data['index', 'spherical_phi']
-          return get_sph_r_component(vectors, theta, phi, normal)
+          rv = get_sph_r_component(vectors, theta, phi, normal)
+          # Now, anywhere that radius is in fact zero, we want to zero out our
+          # return values.
+          rv[np.isnan(theta)] = 0.0
+          return rv
   
 
 ('gas', 'radial_velocity_absolute')
@@ -1766,7 +1770,11 @@
                                   "bulk_%s" % basename)
           theta = data['index', 'spherical_theta']
           phi   = data['index', 'spherical_phi']
-          return get_sph_r_component(vectors, theta, phi, normal)
+          rv = get_sph_r_component(vectors, theta, phi, normal)
+          # Now, anywhere that radius is in fact zero, we want to zero out our
+          # return values.
+          rv[np.isnan(theta)] = 0.0
+          return rv
   
 
 ('gas', 'radiation_acceleration_x')
@@ -2702,12 +2710,8 @@
 .. code-block:: python
 
       def _cylindrical_r(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
   
 
@@ -2721,12 +2725,8 @@
 .. code-block:: python
 
       def _cylindrical_theta(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_cyl_theta(coords, normal)
   
 
@@ -2741,13 +2741,9 @@
 .. code-block:: python
 
       def _cylindrical_z(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = data.ds.arr(obtain_rvec(data), "code_length")
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
-          return get_cyl_z(coords, normal).in_cgs()
+          coords = get_periodic_rvec(data)
+          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
   
 
 ('index', 'disk_angle')
@@ -2903,12 +2899,8 @@
 .. code-block:: python
 
       def _spherical_phi(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_sph_phi(coords, normal)
   
 
@@ -2923,12 +2915,8 @@
 .. code-block:: python
 
       def _spherical_r(field, data):
-          center = data.get_field_parameter("center")
-          coords = data.ds.arr(obtain_rvec(data), "code_length")
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
-          return get_sph_r(coords).in_cgs()
+          coords = get_periodic_rvec(data)
+          return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
   
 
 ('index', 'spherical_theta')
@@ -2941,12 +2929,8 @@
 .. code-block:: python
 
       def _spherical_theta(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_sph_theta(coords, normal)
   
 
@@ -4025,6 +4009,603 @@
    * Units: :math:`\mathrm{\rm{code}~\rm{mass} / \rm{code}~\rm{time}}`
    * Particle Type: True
 
+.. _Castro_specific_fields:
+
+Castro-Specific Fields
+----------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'xmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_x``
+   * Particle Type: False
+
+('boxlib', 'ymom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_y``
+   * Particle Type: False
+
+('boxlib', 'zmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_z``
+   * Particle Type: False
+
+('boxlib', 'x_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'rho_E')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Aliased to: ``energy_density``
+   * Particle Type: False
+
+('boxlib', 'rho_e')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'Temp')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'grav_x')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'grav_y')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'grav_z')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'pressure')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'kineng')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'soundspeed')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``sound_speed``
+   * Particle Type: False
+
+('boxlib', 'Machnumber')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``mach_number``
+   * Particle Type: False
+
+('boxlib', 'entropy')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{K} \cdot \rm{g}}}`
+   * Aliased to: ``entropy``
+   * Particle Type: False
+
+('boxlib', 'magvort')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Aliased to: ``vorticity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'divu')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'eint_E')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'eint_e')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'magvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'radvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'magmom')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} \cdot \rm{g} / \rm{s}}`
+   * Aliased to: ``momentum_magnitude``
+   * Particle Type: False
+
+('boxlib', 'maggrav')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'phiGrav')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+.. _Maestro_specific_fields:
+
+Maestro-Specific Fields
+-----------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'x_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'magvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'radial_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'tfromp')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'tfromh')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'Machnumber')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``mach_number``
+   * Particle Type: False
+
+('boxlib', 'S')
+^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'ad_excess')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltaT')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltagamma')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltap')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'divw0')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'entropy')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{K} \cdot \rm{g}}}`
+   * Aliased to: ``entropy``
+   * Particle Type: False
+
+('boxlib', 'entropypert')
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'enucdot')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{g} \cdot \rm{s}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_x')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_y')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_z')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'h')
+^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'h0')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'momentum')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} \cdot \rm{g} / \rm{s}}`
+   * Aliased to: ``momentum_magnitude``
+   * Particle Type: False
+
+('boxlib', 'p0')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'p0pluspi')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'pi')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'pioverp0')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'rho0')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhoh')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Aliased to: ``enthalpy_density``
+   * Particle Type: False
+
+('boxlib', 'rhoh0')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhohpert')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhopert')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'soundspeed')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``sound_speed``
+   * Particle Type: False
+
+('boxlib', 'sponge')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'tpert')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'vort')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Aliased to: ``vorticity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'w0_x')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'w0_y')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'w0_z')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+.. _Orion_specific_fields:
+
+Orion-Specific Fields
+---------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'eden')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length} \cdot \rm{code}~\rm{time}^{2}}}`
+   * Aliased to: ``energy_density``
+   * Particle Type: False
+
+('boxlib', 'xmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_x``
+   * Particle Type: False
+
+('boxlib', 'ymom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_y``
+   * Particle Type: False
+
+('boxlib', 'zmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_z``
+   * Particle Type: False
+
+('boxlib', 'temperature')
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'Temp')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'x_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'xvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'yvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'zvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('io', 'particle_mass')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{mass}}`
+   * Particle Type: True
+
+('io', 'particle_position_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_position_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_position_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_id')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``particle_index``
+   * Particle Type: True
+
+('io', 'particle_mdot')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{mass} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
 .. _Enzo_specific_fields:
 
 Enzo-Specific Fields

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -23,7 +23,9 @@
       MaestroDataset
 
 from .fields import \
-      BoxlibFieldInfo
+      BoxlibFieldInfo, \
+      MaestroFieldInfo, \
+      CastroFieldInfo
 
 from .io import \
       IOHandlerBoxlib

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -432,6 +432,12 @@
         min_level = self.min_level or 0
         max_level = self.max_level or levels.max()
 
+        # sorts the three arrays in order of ascending level - this makes images look nicer
+        new_indices = np.argsort(levels)
+        levels = levels[new_indices]
+        GLE = GLE[new_indices]
+        GRE = GRE[new_indices]
+        
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * DW[px_index]
             pyo = py_off * DW[py_index]


https://bitbucket.org/yt_analysis/yt/commits/4e0b61026b46/
Changeset:   4e0b61026b46
Branch:      yt
User:        atmyers
Date:        2014-09-03 02:04:49+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  61 files

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -500,13 +500,28 @@
     fi
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
+    BUILD_ARGS=""
+    case $LIB in
+        *h5py*)
+            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            ;;
+        *numpy*)
+            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            then
+                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                                                 import numpy; print SV(numpy.__version__) < SV("1.8.0")')
+                if [ $VER == "True" ]
+                then
+                    echo "Removing previous NumPy instance (see issue #889)"
+                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                fi
+            fi
+            ;;
+        *)
+            ;;
+    esac
     cd $LIB
-    if [ ! -z `echo $LIB | grep h5py` ]
-    then
-	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    else
-        ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    fi
+    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
@@ -726,7 +741,7 @@
         cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
-		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -1022,7 +1037,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/bootcamp/"
+    echo "    http://yt-project.org/doc/quickstart/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -37,7 +37,7 @@
 .. note::
 
    The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
+   http://yt-project.org/data.  See :ref:`quickstart-introduction` for more
    details.
 
 Let us know if you would like to contribute other example notebooks, or have

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/1)_Introduction.ipynb
--- a/doc/source/bootcamp/1)_Introduction.ipynb
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:39620670ce7751b23f30d2123fd3598de1c7843331f65de13e29f4ae9f759e0f"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Welcome to the yt bootcamp!\n",
-      "\n",
-      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
-      "\n",
-      "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook.  The documentation exists at http://yt-project.org/doc/.  If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.\n",
-      "\n",
-      "## Acquiring the datasets for this tutorial\n",
-      "\n",
-      "If you are executing these tutorials interactively, you need some sample datasets on which to run the code.  You can download these datasets at http://yt-project.org/data/.  The datasets necessary for each lesson are noted next to the corresponding tutorial.\n",
-      "\n",
-      "## What's Next?\n",
-      "\n",
-      "The Notebooks are meant to be explored in this order:\n",
-      "\n",
-      "1. Introduction\n",
-      "2. Data Inspection (IsolatedGalaxy dataset)\n",
-      "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n",
-      "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n",
-      "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
-      "6. Volume Rendering (IsolatedGalaxy dataset)"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "download_datasets = False\n",
-      "if download_datasets:\n",
-      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
-      "    print \"Got enzo_tiny_cosmology\"\n",
-      "    !tar xf enzo_tiny_cosmology.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
-      "    print \"Got Enzo_64\"\n",
-      "    !tar xf Enzo_64.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
-      "    print \"Got IsolatedGalaxy\"\n",
-      "    !tar xf IsolatedGalaxy.tar\n",
-      "    \n",
-      "    print \"All done!\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ /dev/null
@@ -1,384 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a8fe78715c1f3900c37c675d84320fe65f0ba8734abba60fd12e74d957e5d8ee"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Starting Out and Loading Data\n",
-      "\n",
-      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now that we've loaded yt, we can load up some data.  Let's load the `IsolatedGalaxy` dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Fields and Facts\n",
-      "\n",
-      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"index\" it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.print_stats()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also tell you the fields it found on disk:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, all of the fields it thinks it knows how to generate:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.derived_field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also transparently generate fields.  However, we encourage you to examine exactly what yt is doing when it generates those fields.  To see, you can ask for the source of a given field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.field_info[\"gas\", \"vorticity_x\"].get_source()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt stores information about the domain of the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also convert this into various units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width.in_units(\"kpc\")\n",
-      "print ds.domain_width.in_units(\"au\")\n",
-      "print ds.domain_width.in_units(\"mile\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Mesh Structure\n",
-      "\n",
-      "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grid_left_edge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The index (`ds.index` here) has an attribute `grids` which is all of the grid objects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grids[1]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g = ds.index.grids[1]\n",
-      "print g"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Grids have dimensions, extents, level, and even a list of Child grids."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.ActiveDimensions"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.LeftEdge, g.RightEdge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Level"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Children"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Advanced Grid Inspection\n",
-      "\n",
-      "If we want to examine grids only at a given level, we can!  Not only that, but we can load data and take a look at various fields.\n",
-      "\n",
-      "*This section can be skipped!*"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "gs = ds.index.select_grids(ds.index.max_level)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g2 = gs[0]\n",
-      "print g2\n",
-      "print g2.Parent\n",
-      "print g2.get_global_startindex()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print g2[\"density\"][:,:,0]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print (g2.Parent.child_mask == 0).sum() * 8\n",
-      "print g2.ActiveDimensions.prod()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "for f in ds.field_list:\n",
-      "    fv = g[f]\n",
-      "    if fv.size == 0: continue\n",
-      "    print f, fv.min(), fv.max()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Examining Data in Regions\n",
-      "\n",
-      "yt provides data object selectors.  In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it.  yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n",
-      "\n",
-      "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10, 'kpc'))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can calculate a bunch of bulk quantities.  Here's that list, but there's a list in the docs, too!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Let's look at the total mass.  This is how you call a given quantity.  yt calls these \"Derived Quantities\".  We'll talk about a few in a later notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.total_mass()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ /dev/null
@@ -1,275 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Simple Visualizations of Data\n",
-      "\n",
-      "Just like in our first notebook, we have to load yt and then some data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "For this notebook, we'll load up a cosmology dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "print \"Redshift =\", ds.current_redshift"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the terms that yt uses, a projection is a line integral through the domain.  This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned.  Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you.  However, we also provide a simple method of creating Projections and plotting them in a single step.  This is called a Plot Window, here specifically known as a `ProjectionPlot`.  One thing to note is that in yt, we project all the way through the entire domain at a single time.  This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n",
-      "\n",
-      "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly.  The cookbook in the documentation includes detailed examples of this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
-      "\n",
-      "Now we'll zoom and pan a bit."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(2.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((0.1, 0.0))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((-0.25, -0.5))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(0.1)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we specify multiple fields, each time we call `show` we get multiple plots back.  Same for `save`!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the colormap on a field-by-field basis."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.set_cmap(\"temperature\", \"hot\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, we can re-center the plot on different locations.  One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "v, c = ds.find_max(\"density\")\n",
-      "p.set_center((c[0], c[1]))\n",
-      "p.zoom(10)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
-      "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
-      "s.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the logging of various fields:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.set_log(\"velocity_magnitude\", True)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides many different annotations for your plots.  You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here.  We'll annotate with velocity arrows."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.annotate_velocity()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Contours can also be overlaid:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
-      "s.annotate_contour(\"temperature\")\n",
-      "s.zoom(2.5)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, we can save out to the file system."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.save()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ /dev/null
@@ -1,382 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a46e1baa90d32045c2b524100f28bad41b3665249612c9a275ee0375a6f4be20"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Data Objects and Time Series Data\n",
-      "\n",
-      "Just like before, we will load up yt.  Since we'll be using pylab to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from matplotlib import pylab\n",
-      "from yt.analysis_modules.halo_finding.api import HaloFinder"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Time Series Data\n",
-      "\n",
-      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `DatasetSeries` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
-      "\n",
-      "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ts = yt.DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 1: Simple Time Series\n",
-      "\n",
-      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for ds in ts` where `ds` means \"Dataset\" and `ts` is the \"Time Series\" we just loaded up.  For each dataset, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the `extrema` Derived Quantity, and append the min and max to our extrema outputs."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "rho_ex = []\n",
-      "times = []\n",
-      "for ds in ts:\n",
-      "    dd = ds.all_data()\n",
-      "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(ds.current_time.in_units(\"Gyr\"))\n",
-      "rho_ex = np.array(rho_ex)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the minimum and the maximum:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n",
-      "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n",
-      "pylab.ylabel(\"Density ($g/cm^3$)\")\n",
-      "pylab.xlabel(\"Time (Gyr)\")\n",
-      "pylab.legend()\n",
-      "pylab.ylim(1e-32, 1e-21)\n",
-      "pylab.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 2: Advanced Time Series\n",
-      "\n",
-      "Let's do something a bit different.  Let's calculate the total mass inside halos and outside halos.\n",
-      "\n",
-      "This actually touches a lot of different pieces of machinery in yt.  For every dataset, we will run the halo finder HOP.  Then, we calculate the total mass in the domain.  Then, for each halo, we calculate the sum of the baryon mass in that halo.  We'll keep running tallies of these two things."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units import Msun\n",
-      "\n",
-      "mass = []\n",
-      "zs = []\n",
-      "for ds in ts:\n",
-      "    halos = HaloFinder(ds)\n",
-      "    dd = ds.all_data()\n",
-      "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    total_in_baryons = 0.0*Msun\n",
-      "    for halo in halos:\n",
-      "        sp = halo.get_sphere()\n",
-      "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    mass.append(total_in_baryons/total_mass)\n",
-      "    zs.append(ds.current_redshift)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now let's plot them!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogx(zs, mass, '-xb')\n",
-      "pylab.xlabel(\"Redshift\")\n",
-      "pylab.ylabel(\"Mass in halos / Total mass\")\n",
-      "pylab.xlim(max(zs), min(zs))\n",
-      "pylab.ylim(-0.01, .18)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Data Objects\n",
-      "\n",
-      "Time series data have many applications, but most of them rely on examining the underlying data in some way.  Below, we'll see how to use and manipulate data objects.\n",
-      "\n",
-      "### Ray Queries\n",
-      "\n",
-      "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
-      "\n",
-      "To create a ray, we specify the start and end points.\n",
-      "\n",
-      "Note that we need to convert these arrays to numpy arrays due to a bug in matplotlib 1.3.1."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
-      "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"dts\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"t\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"x\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Slice Queries\n",
-      "\n",
-      "While slices are often used for visualization, they can be useful for other operations as well.  yt regards slices as multi-resolution objects.  They are an array of cells that are not all the same size; it only returns the cells at the highest resolution that it intersects.  (This is true for all yt data objects.)  Slices and projections have the special fields `px`, `py`, `pdx` and `pdy`, which correspond to the coordinates and half-widths in the pixel plane."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "v, c = ds.find_max(\"density\")\n",
-      "sl = ds.slice(0, c[0])\n",
-      "print sl[\"index\", \"x\"]\n",
-      "print sl[\"index\", \"z\"]\n",
-      "print sl[\"pdx\"]\n",
-      "print sl[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to do something interesting with a `Slice`, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "frb = sl.to_frb((50.0, 'kpc'), 1024)\n",
-      "print frb[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of `density`, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "yt.write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
-      "from IPython.display import Image\n",
-      "Image(filename = \"temp.png\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Off-Axis Slices\n",
-      "\n",
-      "yt provides not only slices, but off-axis slices that are sometimes called \"cutting planes.\"  These are specified by (in order) a normal vector and a center.  Here we've set the normal vector to `[0.2, 0.3, 0.5]` and the center to be the point of maximum density.\n",
-      "\n",
-      "We can then turn these directly into plot windows using `to_pw`.  Note that the `to_pw` and `to_frb` methods are available on slices, off-axis slices, and projections, and can be used on any of them."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
-      "pw = cp.to_pw(fields = [(\"gas\", \"density\")])"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Once we have our plot window from our cutting plane, we can show it here."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pw.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can, as noted above, do the same with our slice:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pws = sl.to_pw(fields=[\"density\"])\n",
-      "#pws.show()\n",
-      "print pws.plots.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Covering Grids\n",
-      "\n",
-      "If we want to access a 3D array of data that spans multiple resolutions in our simulation, we can use a covering grid.  This will return a 3D array of data, drawing from up to the resolution level specified when creating the data.  For example, if you create a covering grid that spans two child grids of a single parent grid, it will fill those zones covered by a zone of a child grid with the data from that child grid.  Where it is covered only by the parent grid, the cells from the parent grid will be duplicated (appropriately) to fill the covering grid.\n",
-      "\n",
-      "There are two different types of covering grids: unsmoothed and smoothed.  Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc.  This will help to reduce edge effects.  Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n",
-      "\n",
-      "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2.  We can then ask for the Density field, which will be a 3D array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cg = ds.covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print cg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In this example, we do exactly the same thing: except we ask for a *smoothed* covering grid, which will reduce edge effects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "scg = ds.smoothed_covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print scg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ /dev/null
@@ -1,254 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Derived Fields and Profiles\n",
-      "\n",
-      "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk.  This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used.  Additionally, you can create them by just writing python functions."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from yt import derived_field\n",
-      "from matplotlib import pylab"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Derived Fields\n",
-      "\n",
-      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `dinosaurs` and our units are `K*cm/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "@derived_field(name = \"dinosaurs\", units = \"K * cm/s\")\n",
-      "def _dinos(field, data):\n",
-      "    return data[\"temperature\"] * data[\"velocity_magnitude\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One important thing to note is that derived fields must be defined *before* any datasets are loaded.  Let's load up our data and take a look at some quantities."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "dd = ds.all_data()\n",
-      "print dd.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `extrema` quantity -- the exact same way that we would for density, temperature, and so on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.extrema(\"dinosaurs\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can do the same for the average quantities as well."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.weighted_average_quantity(\"dinosaurs\", weight=\"temperature\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## A Few Other Quantities\n",
-      "\n",
-      "We can ask other quantities of our data, as well.  For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema.  All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10.0, 'kpc'))\n",
-      "bv = sp.quantities.bulk_velocity()\n",
-      "L = sp.quantities.angular_momentum_vector()\n",
-      "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
-      "print bv\n",
-      "print L\n",
-      "print rho_min, rho_max"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Profiles\n",
-      "\n",
-      "yt provides the ability to bin in 1, 2 and 3 dimensions.  This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n",
-      "\n",
-      "We do this using the objects `Profile1D`, `Profile2D`, and `Profile3D`.  The first two are the most common since they are the easiest to visualize.\n",
-      "\n",
-      "This first set of commands manually creates a profile object the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `temperature` and (previously-defined) `dinosaurs`.  We then plot it in a loglog plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
-      "prof.add_fields([\"temperature\",\"dinosaurs\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Temperature $(K)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the `dinosaurs` field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Dinosaurs $(K cm / s)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to see the total mass in every bin, we profile the `cell_mass` field with no weight.  Specifying `weight=None` will simply take the total value in every bin and add that up."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
-      "prof.add_fields([\"cell_mass\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Cell mass $(M_\\odot)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class.  Let's redo the last plot using `ProfilePlot`"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n",
-      "prof.set_unit('cell_mass', 'Msun')\n",
-      "prof.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Field Parameters\n",
-      "\n",
-      "Field parameters are a method of passing information to derived fields.  For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation.  yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off.  Here we show how that works:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp_small = ds.sphere(\"max\", (50.0, 'kpc'))\n",
-      "bv = sp_small.quantities.bulk_velocity()\n",
-      "\n",
-      "sp = ds.sphere(\"max\", (0.1, 'Mpc'))\n",
-      "rv1 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "sp.clear_data()\n",
-      "sp.set_field_parameter(\"bulk_velocity\", bv)\n",
-      "rv2 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "print bv\n",
-      "print rv1\n",
-      "print rv2"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/6)_Volume_Rendering.ipynb
--- a/doc/source/bootcamp/6)_Volume_Rendering.ipynb
+++ /dev/null
@@ -1,96 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:2a24bbe82955f9d948b39cbd1b1302968ff57f62f73afb2c7a5c4953393d00ae"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# A Brief Demo of Volume Rendering\n",
-      "\n",
-      "This shows a small amount of volume rendering.  Really, just enough to get your feet wet!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To create a volume rendering, we need a camera and a transfer function.  We'll use the `ColorTransferFunction`, which accepts (in log space) the minimum and maximum bounds of our transfer function.  This means behavior for data outside these values is undefined.\n",
-      "\n",
-      "We then add on \"layers\" like an onion.  This function can accept a width (here specified) in data units, and also a color map.  Here we add on four layers.\n",
-      "\n",
-      "Finally, we create a camera.  The focal point is `[0.5, 0.5, 0.5]`, the width is 20 kpc (including front-to-back integration) and we specify a transfer function.  Once we've done that, we call `show` to actually cast our rays and display them inline."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -24))\n",
-      "tf.add_layers(4, w=0.01)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
-      "cam.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cam.show(clip_ratio=4)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "There are several other options we can specify.  Note that here we have turned on the use of ghost zones, shortened the data interval for the transfer function, and widened our gaussian layers."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -25))\n",
-      "tf.add_layers(4, w=0.03)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
-      "cam.show(clip_ratio=4.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/data_inspection.rst
--- a/doc/source/bootcamp/data_inspection.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _data_inspection:
-
-Data Inspection
----------------
-
-.. notebook:: 2)_Data_Inspection.ipynb

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/data_objects_and_time_series.rst
--- a/doc/source/bootcamp/data_objects_and_time_series.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Data Objects and Time Series
-----------------------------
-
-.. notebook:: 4)_Data_Objects_and_Time_Series.ipynb

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/derived_fields_and_profiles.rst
--- a/doc/source/bootcamp/derived_fields_and_profiles.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Derived Fields and Profiles
----------------------------
-
-.. notebook:: 5)_Derived_Fields_and_Profiles.ipynb

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/index.rst
--- a/doc/source/bootcamp/index.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-.. _bootcamp:
-
-yt Bootcamp
-===========
-
-The bootcamp is a series of worked examples of how to use much of the
-funtionality of yt.  These are simple, short introductions to give you a taste
-of what the code can do and are not meant to be detailed walkthroughs.
-
-There are two ways in which you can go through the bootcamp: interactively and 
-non-interactively.  We recommend the interactive method, but if you're pressed 
-on time, you can non-interactively go through the linked pages below and view the 
-worked examples.
-
-To execute the bootcamp interactively, you need to download the repository and
-start the IPython notebook.  If you do not already have the yt repository, the
-easiest way to get the repository is to clone it using mercurial:
-
-.. code-block:: bash
-
-   hg clone https://bitbucket.org/yt_analysis/yt
-
-Now start the IPython notebook from within the repository:
-
-.. code-block:: bash
-
-   cd yt/doc/source/bootcamp
-   yt notebook
-
-This command will give you information about the notebook server and how to
-access it.  You will basically just pick a password (for security reasons) and then 
-redirect your web browser to point to the notebook server.
-Once you have done so, choose "Introduction" from the list of
-notebooks, which includes an introduction and information about how to download
-the sample data.
-
-.. warning:: The pre-filled out notebooks are *far* less fun than running them
-             yourselves!  Check out the repo and give it a try.
-
-Here are the notebooks, which have been filled in for inspection:
-
-.. toctree::
-   :maxdepth: 1
-
-   introduction
-   data_inspection
-   simple_visualization
-   data_objects_and_time_series
-   derived_fields_and_profiles
-   volume_rendering
-
-.. note::
-
-   The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
-   details.
-
-Let us know if you would like to contribute other example notebooks, or have
-any suggestions for how these can be improved.

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/introduction.rst
--- a/doc/source/bootcamp/introduction.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _bootcamp-introduction:
-
-Introduction
-------------
-
-.. notebook:: 1)_Introduction.ipynb

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/simple_visualization.rst
--- a/doc/source/bootcamp/simple_visualization.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Simple Visualization
---------------------
-
-.. notebook:: 3)_Simple_Visualization.ipynb

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/bootcamp/volume_rendering.rst
--- a/doc/source/bootcamp/volume_rendering.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Volume Rendering
-----------------
-
-.. notebook:: 6)_Volume_Rendering.ipynb

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -122,7 +122,7 @@
     bootswatch_theme = "readable",
     navbar_links = [
         ("How to get help", "help/index"),
-        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Quickstart notebooks", "quickstart/index"),
         ("Cookbook", "cookbook/index"),
         ],
     navbar_sidebarrel = False,

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -90,3 +90,14 @@
 See :ref:`filtering-particles` for more information.
 
 .. yt_cookbook:: particle_filter_sfr.py
+
+Making a Turbulent Kinetic Energy Power Spectrum
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe shows how to use `yt` to read data and put it on a uniform
+grid to interface with the NumPy FFT routines and create a turbulent
+kinetic energy power spectrum.  (Note: the dataset used here is of low
+resolution, so the turbulence is not very well-developed.  The spike
+at high wavenumbers is due to non-periodicity in the z-direction).
+
+.. yt_cookbook:: power_spectrum_example.py

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/cookbook/custom_colorbar_tickmarks.rst
--- a/doc/source/cookbook/custom_colorbar_tickmarks.rst
+++ b/doc/source/cookbook/custom_colorbar_tickmarks.rst
@@ -1,4 +1,4 @@
-Custom Colorabar Tickmarks
---------------------------
+Custom Colorbar Tickmarks
+-------------------------
 
 .. notebook:: custom_colorbar_tickmarks.ipynb

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/cookbook/power_spectrum_example.py
--- /dev/null
+++ b/doc/source/cookbook/power_spectrum_example.py
@@ -0,0 +1,118 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import yt
+
+"""
+Make a turbulent KE power spectrum.  Since we are stratified, we use
+a rho**(1/3) scaling to the velocity to get something that would
+look Kolmogorov (if the turbulence were fully developed).
+
+Ultimately, we aim to compute:
+
+                      1  ^      ^*                                           
+     E(k) = integral  -  V(k) . V(k) dS                                      
+                      2                                                      
+ 
+             n                                               ^               
+where V = rho  U is the density-weighted velocity field, and V is the
+FFT of V.
+ 
+(Note: sometimes we normalize by 1/volume to get a spectral
+energy density spectrum).
+
+
+"""
+ 
+
+def doit(ds):
+
+    # a FFT operates on uniformly gridded data.  We'll use the yt
+    # covering grid for this.
+
+    max_level = ds.index.max_level
+
+    ref = int(np.product(ds.ref_factors[0:max_level]))
+
+    low = ds.domain_left_edge
+    dims = ds.domain_dimensions*ref
+
+    nx, ny, nz = dims
+
+    nindex_rho = 1./3.
+
+    Kk = np.zeros( (nx/2+1, ny/2+1, nz/2+1))
+
+    for vel in [("gas", "velocity_x"), ("gas", "velocity_y"), 
+                ("gas", "velocity_z")]:
+
+        Kk += 0.5*fft_comp(ds, ("gas", "density"), vel,
+                           nindex_rho, max_level, low, dims)
+
+    # wavenumbers
+    L = (ds.domain_right_edge - ds.domain_left_edge).d
+
+    kx = np.fft.rfftfreq(nx)*nx/L[0]
+    ky = np.fft.rfftfreq(ny)*ny/L[1]
+    kz = np.fft.rfftfreq(nz)*nz/L[2]
+    
+    # physical limits to the wavenumbers
+    kmin = np.min(1.0/L)
+    kmax = np.max(0.5*dims/L)
+    
+    kbins = np.arange(kmin, kmax, kmin)
+    N = len(kbins)
+
+    # bin the Fourier KE into radial kbins
+    kx3d, ky3d, kz3d = np.meshgrid(kx, ky, kz, indexing="ij")
+    k = np.sqrt(kx3d**2 + ky3d**2 + kz3d**2)
+
+    whichbin = np.digitize(k.flat, kbins)
+    ncount = np.bincount(whichbin)
+    
+    E_spectrum = np.zeros(len(ncount)-1)
+
+    for n in range(1,len(ncount)):
+        E_spectrum[n-1] = np.sum(Kk.flat[whichbin==n])
+
+    k = 0.5*(kbins[0:N-1] + kbins[1:N])
+    E_spectrum = E_spectrum[1:N]
+
+    index = np.argmax(E_spectrum)
+    kmax = k[index]
+    Emax = E_spectrum[index]
+
+    plt.loglog(k, E_spectrum)
+    plt.loglog(k, Emax*(k/kmax)**(-5./3.), ls=":", color="0.5")
+
+    plt.xlabel(r"$k$")
+    plt.ylabel(r"$E(k)dk$")
+
+    plt.savefig("spectrum.png")
+
+
+def fft_comp(ds, irho, iu, nindex_rho, level, low, delta ):
+
+    cube = ds.covering_grid(level, left_edge=low,
+                            dims=delta,
+                            fields=[irho, iu])
+
+    rho = cube[irho].d
+    u = cube[iu].d
+
+    nx, ny, nz = rho.shape
+
+    # do the FFTs -- note that since our data is real, there will be
+    # too much information here.  fftn puts the positive freq terms in
+    # the first half of the axes -- that's what we keep.  Our
+    # normalization has an '8' to account for this clipping to one
+    # octant.
+    ru = np.fft.fftn(rho**nindex_rho * u)[0:nx/2+1,0:ny/2+1,0:nz/2+1]
+    ru = 8.0*ru/(nx*ny*nz)
+
+    return np.abs(ru)**2
+
+
+if __name__ == "__main__":
+
+    ds = yt.load("maestro_xrb_lores_23437")
+    doit(ds)

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -28,7 +28,7 @@
 * Analyzing
 * Examining
 * Cookbook
-* Bootcamp
+* Quickstart
 * Developing
 * Reference
 * Help

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -12,7 +12,7 @@
           based simulations.  For now, these are represented as patches, with
           the attendant properties.
 
-For a more basic introduction, see :ref:`bootcamp` and more specifically
+For a more basic introduction, see :ref:`quickstart` and more specifically
 :ref:`data_inspection`.
 
 .. _examining-grid-hierarchies:

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -34,7 +34,7 @@
      <tr valign="top"><td width="25%"><p>
-           <a href="bootcamp/index.html">yt Bootcamp</a>
+           <a href="quickstart/index.html">yt Quickstart</a></p></td><td width="75%">
@@ -127,7 +127,7 @@
    :hidden:
 
    installing
-   yt Bootcamp <bootcamp/index>
+   yt Quickstart <quickstart/index>
    yt3differences
    cookbook/index
    visualizing/index

diff -r 06c705a621112cb3a2f2e670089ee5f74f0e2be2 -r 4e0b61026b46134ec92ed06f2f7fd1cef7871636 doc/source/quickstart/1)_Introduction.ipynb
--- /dev/null
+++ b/doc/source/quickstart/1)_Introduction.ipynb
@@ -0,0 +1,72 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:7c68cdd34ce71c042fa3c4badc4587693f1cc1b6aa0b3c99a4a63a1db6fe57f9"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Welcome to the yt quickstart!\n",
+      "\n",
+      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
+      "\n",
+      "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook.  The documentation exists at http://yt-project.org/doc/.  If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.\n",
+      "\n",
+      "## Acquiring the datasets for this tutorial\n",
+      "\n",
+      "If you are executing these tutorials interactively, you need some sample datasets on which to run the code.  You can download these datasets at http://yt-project.org/data/.  The datasets necessary for each lesson are noted next to the corresponding tutorial.\n",
+      "\n",
+      "## What's Next?\n",
+      "\n",
+      "The Notebooks are meant to be explored in this order:\n",
+      "\n",
+      "1. Introduction\n",
+      "2. Data Inspection (IsolatedGalaxy dataset)\n",
+      "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n",
+      "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n",
+      "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
+      "6. Volume Rendering (IsolatedGalaxy dataset)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "download_datasets = False\n",
+      "if download_datasets:\n",
+      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
+      "    print \"Got enzo_tiny_cosmology\"\n",
+      "    !tar xf enzo_tiny_cosmology.tar\n",
+      "    \n",
+      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
+      "    print \"Got Enzo_64\"\n",
+      "    !tar xf Enzo_64.tar\n",
+      "    \n",
+      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
+      "    print \"Got IsolatedGalaxy\"\n",
+      "    !tar xf IsolatedGalaxy.tar\n",
+      "    \n",
+      "    print \"All done!\""
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b0658872803e/
Changeset:   b0658872803e
Branch:      yt
User:        atmyers
Date:        2014-09-03 03:58:12+00:00
Summary:     reverting out particle plot changes for now
Affected #:  3 files

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r b0658872803e108385ece9c90d9715673f1b562f yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -143,7 +143,7 @@
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, ProfilePlot, PhasePlot, ParticlePlot
+    show_colormaps, ProfilePlot, PhasePlot
 
 from yt.visualization.volume_rendering.api import \
     off_axis_projection, ColorTransferFunction, \

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r b0658872803e108385ece9c90d9715673f1b562f yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -52,9 +52,6 @@
     ProfilePlot, \
     PhasePlot
 
-from .particle_plotter import \
-    ParticlePlot
-    
 from .base_plot_types import \
     get_multi_plot
 

diff -r b5403ca197e73e17b07b5575424e1bdb657e0b10 -r b0658872803e108385ece9c90d9715673f1b562f yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ /dev/null
@@ -1,370 +0,0 @@
-"""
-This is a simple mechanism for interfacing with Particle plots
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-
-import __builtin__
-import base64
-import types
-
-from functools import wraps
-from itertools import izip
-import matplotlib
-import numpy as np
-import cStringIO
-
-from yt.utilities.exceptions import \
-    YTNotInsideNotebook
-from yt.utilities.logger import ytLogger as mylog
-import _mpl_imports as mpl
-from yt.funcs import \
-    ensure_list, \
-    get_image_suffix, \
-    get_ipython_api_version
-from yt.units.unit_object import Unit
-from .profile_plotter import \
-    get_canvas, \
-    invalidate_plot, \
-    sanitize_label
-
-class ParticlePlot(object):
-    r"""
-    Create a particle scatter plot from a data source.
-
-    Given a data object (all_data, region, sphere, etc.), an x field, 
-    and a y field (both of particle type), this will create a scatter
-    plot with one marker for each particle.
-
-    Parameters
-    ----------
-    data_source : AMR3DData Object
-        The data object to be profiled, such as all_data, region, or 
-        sphere.
-    x_field : str
-        The field to plot on the x-axis.
-    y_fields : str
-        The field to plot on the y-axis.
-    plot_spec : dict or list of dicts
-        A dictionary or list of dictionaries containing plot keyword 
-        arguments.  This will be passed to pyplot.plot. 
-        For example, dict(c='r', marker='.').
-        Default: dict(c='b', marker='.', linestyle='None', markersize=8)
-
-    Examples
-    --------
-
-    >>> import yt
-    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-    >>> ad = ds.all_data()
-    >>> plot = yt.ParticlePlot(ad, 'particle_position_x', 'particle_velocity_x')
-    >>> plot.save()
-
-    Use set_line_property to change line properties.
-    
-    """
-    x_log = None
-    y_log = None
-    z_log = None
-    x_title = None
-    y_title = None
-    x_lim = (None, None)
-    y_lim = (None, None)
-    _plot_valid = False
-
-    def __init__(self, data_source, x_field, y_field,
-                 label=None, plot_spec=None):
-
-        if plot_spec is None:
-            plot_spec = {'c':'b', 'marker':'.', 'linestyle':'None', 'markersize':8}
-
-        self.data_source = data_source
-        self.x_field = x_field
-        self.y_field = y_field
-        self.label = sanitize_label(label, 1)
-        self.plot_spec = plot_spec
-
-        self.x_data = self.data_source[x_field]
-        self.y_data = self.data_source[y_field]
-        
-        self.figure = mpl.matplotlib.figure.Figure((10, 8))
-        self.axis = self.figure.add_subplot(111)
-        self._setup_plots()
-
-    def save(self, name=None):
-        r"""
-         Saves the scatter plot to disk.
-
-         Parameters
-         ----------
-         name : str
-             The output file keyword.
-
-         """
-        if not self._plot_valid:
-            self._setup_plots()
-        if name is None:
-            prefix = self.data_source.ds
-            name = "%s.png" % prefix
-        suffix = get_image_suffix(name)
-        prefix = name[:name.rfind(suffix)]
-        xfn = self.x_field
-        if isinstance(xfn, types.TupleType):
-            xfn = xfn[1]
-        yfn = self.y_field
-        if isinstance(yfn, types.TupleType):
-            yfn = yfn[1]
-        if not suffix:
-            suffix = ".png"
-        canvas_cls = get_canvas(name)
-        canvas = canvas_cls(self.figure)
-        fn = "%s_ScatterPlot_%s_%s%s" % (prefix, xfn, yfn, suffix)
-        mylog.info("Saving %s", fn)
-        canvas.print_figure(fn)
-        return fn
-
-    def show(self):
-        r"""This will send any the plot to the IPython notebook.
-
-        If yt is being run from within an IPython session, and it is able to
-        determine this, this function will send the plot to the
-        notebook for display.
-
-        If yt can't determine if it's inside an IPython session, it will raise
-        YTNotInsideNotebook.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_position_y')
-        >>> pp.show()
-
-        """
-        if "__IPYTHON__" in dir(__builtin__):
-            api_version = get_ipython_api_version()
-            if api_version in ('0.10', '0.11'):
-                self._send_zmq()
-            else:
-                from IPython.display import display
-                display(self)
-        else:
-            raise YTNotInsideNotebook
-
-    def _repr_html_(self):
-        """Return an html representation of the plot object. Will display as a
-        png for each WindowPlotMPL instance in self.plots"""
-        ret = ''
-        canvas = mpl.FigureCanvasAgg(self.figure)
-        f = cStringIO.StringIO()
-        canvas.print_figure(f)
-        f.seek(0)
-        img = base64.b64encode(f.read())
-        ret += '<img src="data:image/png;base64,%s"><br>' % img
-        return ret
-
-    def _setup_plots(self):
-        self.axis.cla()
-        self.axis.plot(np.array(self.x_data), np.array(self.y_data),
-                       label=self.label, **self.plot_spec)
-
-        xscale, yscale = self._get_axis_log()
-        xtitle, ytitle = self._get_axis_titles()
-
-        self.axis.set_xscale(xscale)
-        self.axis.set_yscale(yscale)
-
-        self.axis.set_xlabel(xtitle)
-        self.axis.set_ylabel(ytitle)
-
-        self.axis.set_xlim(*self.x_lim)
-        self.axis.set_ylim(*self.y_lim)
-
-        if any(self.label):
-            self.axis.legend(loc="best")
-
-        self._plot_valid = True
-
-    @invalidate_plot
-    def set_line_property(self, property, value):
-        r"""
-        Set properties for the line on the plot.
-
-        Parameters
-        ----------
-        property : str
-            The line property to be set.
-        value : str, int, float
-            The value to set for the line property.
-
-        Examples
-        --------
-
-        plot.set_line_property("marker", "+")
-
-        
-        """
-        specs = self.plot_spec
-        specs[property] = value
-        return self
-
-    @invalidate_plot
-    def set_xlog(self, log):
-        """set the x-axis to log or linear.
-
-        Parameters
-        ----------
-
-        log : boolean
-            Log on/off.
-        """
-        self.x_log = log
-        return self
-
-    @invalidate_plot
-    def set_ylog(self, log):
-        """set the y-axis to log or linear.
-
-        Parameters
-        ----------
-
-        log : boolean
-            Log on/off.
-        """
-        self.y_log = log
-        return self
-    
-
-    @invalidate_plot
-    def set_unit(self, field, unit):
-        """Sets a new unit for the requested field
-
-        Parameters
-        ----------
-        field : string
-           The name of the field that is to be changed.
-
-        new_unit : string or Unit object
-           The name of the new unit.
-        """
-        if field == self.x_field:
-            self.x_data.convert_to_units(unit)
-        elif field == self.y_field:
-            self.y_data.convert_to_units(unit)
-        else:
-            raise KeyError("Field %s not in the plot!" % (field))
-        return self
-
-    @invalidate_plot
-    def set_xlim(self, xmin=None, xmax=None):
-        """Sets the limits of the x field
-
-        Parameters
-        ----------
-        
-        xmin : float or None
-          The new x minimum.  Defaults to None, which leaves the xmin
-          unchanged.
-
-        xmax : float or None
-          The new x maximum.  Defaults to None, which leaves the xmax
-          unchanged.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
-        >>> pp.set_xlim(0.1, 0.9)
-        >>> pp.save()
-
-        """
-        self.x_lim = (xmin, xmax)
-        return self
-
-    @invalidate_plot
-    def set_ylim(self, ymin=None, ymax=None):
-        """Sets the limits for the y-axis of the plot.
-
-        Parameters
-        ----------
-
-        ymin : float or None
-          The new y minimum.  Defaults to None, which leaves the ymin
-          unchanged.
-
-        ymax : float or None
-          The new y maximum.  Defaults to None, which leaves the ymax
-          unchanged.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = yt.ParticlePlot(ds.all_data(), 'particle_position_x', 'particle_velocity_x')
-        >>> pp.set_ylim(1e1, 1e8)
-        >>> pp.save()
-
-        """
-        self.y_lim = (ymin, ymax)
-        return self
-
-    def _get_axis_log(self):
-
-        xf, = self.data_source._determine_fields([self.x_field])
-        xfi = self.data_source.ds._get_field_info(*xf)
-        if self.x_log is None:
-            x_log = xfi.take_log
-        else:
-            x_log = self.x_log
-
-        yf, = self.data_source._determine_fields([self.y_field])
-        yfi = self.data_source.ds._get_field_info(*yf)
-        if self.y_log is None:
-            y_log = yfi.take_log
-        else:
-            y_log = self.y_log
-        
-        scales = {True: 'log', False: 'linear'}
-        return scales[x_log], scales[y_log]
-
-    def _get_field_label(self, field, field_info, field_unit):
-        field_unit = field_unit.latex_representation()
-        field_name = field_info.display_name
-        if isinstance(field, tuple): field = field[1]
-        if field_name is None:
-            field_name = r'$\rm{'+field+r'}$'
-            field_name = r'$\rm{'+field.replace('_','\/').title()+r'}$'
-        elif field_name.find('$') == -1:
-            field_name = field_name.replace(' ','\/')
-            field_name = r'$\rm{'+field_name+r'}$'
-        if field_unit is None or field_unit == '' or field_unit == '1':
-            label = field_name
-        else:
-            label = field_name+r'$\/\/('+field_unit+r')$'
-        return label
-
-    def _get_axis_titles(self):
-
-        xfi = self.data_source.ds._get_field_info(self.x_field)
-        x_unit = Unit(self.x_data.units, registry=self.data_source.ds.unit_registry)
-        x_title = self._get_field_label(self.x_field, xfi, x_unit)
-
-        yfi = self.data_source.ds._get_field_info(self.y_field)
-        y_unit = Unit(self.y_data.units, registry=self.data_source.ds.unit_registry)
-        y_title = self._get_field_label(self.y_field, yfi, y_unit)
-
-        return (x_title, y_title)


https://bitbucket.org/yt_analysis/yt/commits/aa8a1f105a52/
Changeset:   aa8a1f105a52
Branch:      yt
User:        atmyers
Date:        2014-09-03 04:01:32+00:00
Summary:     merging
Affected #:  68 files

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -186,9 +186,20 @@
     this_f = getattr(frontends_module, frontend)
     field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
     dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
+
     if frontend == "sph":
         field_info_names = \
           ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    elif frontend == "boxlib":
+        field_info_names = []
+        for d in dataset_names:
+            if "Maestro" in d:  
+                field_info_names.append("MaestroFieldInfo")
+            elif "Castro" in d: 
+                field_info_names.append("CastroFieldInfo")
+            else: 
+                field_info_names.append("BoxlibFieldInfo")
+
     for dset_name, fi_name in zip(dataset_names, field_info_names):
         fi = getattr(this_f, fi_name)
         nfields = 0

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -500,13 +500,28 @@
     fi
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
+    BUILD_ARGS=""
+    case $LIB in
+        *h5py*)
+            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            ;;
+        *numpy*)
+            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            then
+                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                                                 import numpy; print SV(numpy.__version__) < SV("1.8.0")')
+                if [ $VER == "True" ]
+                then
+                    echo "Removing previous NumPy instance (see issue #889)"
+                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                fi
+            fi
+            ;;
+        *)
+            ;;
+    esac
     cd $LIB
-    if [ ! -z `echo $LIB | grep h5py` ]
-    then
-	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    else
-        ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    fi
+    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
@@ -580,56 +595,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +666,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -729,7 +741,7 @@
         cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
-		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -932,7 +944,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY
@@ -1026,7 +1037,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/bootcamp/"
+    echo "    http://yt-project.org/doc/quickstart/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -37,7 +37,7 @@
 .. note::
 
    The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
+   http://yt-project.org/data.  See :ref:`quickstart-introduction` for more
    details.
 
 Let us know if you would like to contribute other example notebooks, or have

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/1)_Introduction.ipynb
--- a/doc/source/bootcamp/1)_Introduction.ipynb
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:39620670ce7751b23f30d2123fd3598de1c7843331f65de13e29f4ae9f759e0f"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Welcome to the yt bootcamp!\n",
-      "\n",
-      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
-      "\n",
-      "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook.  The documentation exists at http://yt-project.org/doc/.  If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.\n",
-      "\n",
-      "## Acquiring the datasets for this tutorial\n",
-      "\n",
-      "If you are executing these tutorials interactively, you need some sample datasets on which to run the code.  You can download these datasets at http://yt-project.org/data/.  The datasets necessary for each lesson are noted next to the corresponding tutorial.\n",
-      "\n",
-      "## What's Next?\n",
-      "\n",
-      "The Notebooks are meant to be explored in this order:\n",
-      "\n",
-      "1. Introduction\n",
-      "2. Data Inspection (IsolatedGalaxy dataset)\n",
-      "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n",
-      "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n",
-      "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
-      "6. Volume Rendering (IsolatedGalaxy dataset)"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "download_datasets = False\n",
-      "if download_datasets:\n",
-      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
-      "    print \"Got enzo_tiny_cosmology\"\n",
-      "    !tar xf enzo_tiny_cosmology.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
-      "    print \"Got Enzo_64\"\n",
-      "    !tar xf Enzo_64.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
-      "    print \"Got IsolatedGalaxy\"\n",
-      "    !tar xf IsolatedGalaxy.tar\n",
-      "    \n",
-      "    print \"All done!\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ /dev/null
@@ -1,384 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a8fe78715c1f3900c37c675d84320fe65f0ba8734abba60fd12e74d957e5d8ee"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Starting Out and Loading Data\n",
-      "\n",
-      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now that we've loaded yt, we can load up some data.  Let's load the `IsolatedGalaxy` dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Fields and Facts\n",
-      "\n",
-      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"index\" it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.print_stats()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also tell you the fields it found on disk:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, all of the fields it thinks it knows how to generate:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.derived_field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also transparently generate fields.  However, we encourage you to examine exactly what yt is doing when it generates those fields.  To see, you can ask for the source of a given field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.field_info[\"gas\", \"vorticity_x\"].get_source()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt stores information about the domain of the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also convert this into various units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width.in_units(\"kpc\")\n",
-      "print ds.domain_width.in_units(\"au\")\n",
-      "print ds.domain_width.in_units(\"mile\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Mesh Structure\n",
-      "\n",
-      "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grid_left_edge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The index (`ds.index` here) has an attribute `grids` which is all of the grid objects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grids[1]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g = ds.index.grids[1]\n",
-      "print g"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Grids have dimensions, extents, level, and even a list of Child grids."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.ActiveDimensions"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.LeftEdge, g.RightEdge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Level"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Children"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Advanced Grid Inspection\n",
-      "\n",
-      "If we want to examine grids only at a given level, we can!  Not only that, but we can load data and take a look at various fields.\n",
-      "\n",
-      "*This section can be skipped!*"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "gs = ds.index.select_grids(ds.index.max_level)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g2 = gs[0]\n",
-      "print g2\n",
-      "print g2.Parent\n",
-      "print g2.get_global_startindex()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print g2[\"density\"][:,:,0]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print (g2.Parent.child_mask == 0).sum() * 8\n",
-      "print g2.ActiveDimensions.prod()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "for f in ds.field_list:\n",
-      "    fv = g[f]\n",
-      "    if fv.size == 0: continue\n",
-      "    print f, fv.min(), fv.max()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Examining Data in Regions\n",
-      "\n",
-      "yt provides data object selectors.  In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it.  yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n",
-      "\n",
-      "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10, 'kpc'))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can calculate a bunch of bulk quantities.  Here's that list, but there's a list in the docs, too!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Let's look at the total mass.  This is how you call a given quantity.  yt calls these \"Derived Quantities\".  We'll talk about a few in a later notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.total_mass()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ /dev/null
@@ -1,275 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Simple Visualizations of Data\n",
-      "\n",
-      "Just like in our first notebook, we have to load yt and then some data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "For this notebook, we'll load up a cosmology dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "print \"Redshift =\", ds.current_redshift"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the terms that yt uses, a projection is a line integral through the domain.  This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned.  Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you.  However, we also provide a simple method of creating Projections and plotting them in a single step.  This is called a Plot Window, here specifically known as a `ProjectionPlot`.  One thing to note is that in yt, we project all the way through the entire domain at a single time.  This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n",
-      "\n",
-      "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly.  The cookbook in the documentation includes detailed examples of this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
-      "\n",
-      "Now we'll zoom and pan a bit."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(2.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((0.1, 0.0))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((-0.25, -0.5))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(0.1)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we specify multiple fields, each time we call `show` we get multiple plots back.  Same for `save`!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the colormap on a field-by-field basis."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.set_cmap(\"temperature\", \"hot\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, we can re-center the plot on different locations.  One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "v, c = ds.find_max(\"density\")\n",
-      "p.set_center((c[0], c[1]))\n",
-      "p.zoom(10)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
-      "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
-      "s.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the logging of various fields:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.set_log(\"velocity_magnitude\", True)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides many different annotations for your plots.  You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here.  We'll annotate with velocity arrows."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.annotate_velocity()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Contours can also be overlaid:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
-      "s.annotate_contour(\"temperature\")\n",
-      "s.zoom(2.5)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, we can save out to the file system."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.save()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ /dev/null
@@ -1,382 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a46e1baa90d32045c2b524100f28bad41b3665249612c9a275ee0375a6f4be20"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Data Objects and Time Series Data\n",
-      "\n",
-      "Just like before, we will load up yt.  Since we'll be using pylab to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from matplotlib import pylab\n",
-      "from yt.analysis_modules.halo_finding.api import HaloFinder"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Time Series Data\n",
-      "\n",
-      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `DatasetSeries` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
-      "\n",
-      "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ts = yt.DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 1: Simple Time Series\n",
-      "\n",
-      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for ds in ts` where `ds` means \"Dataset\" and `ts` is the \"Time Series\" we just loaded up.  For each dataset, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the `extrema` Derived Quantity, and append the min and max to our extrema outputs."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "rho_ex = []\n",
-      "times = []\n",
-      "for ds in ts:\n",
-      "    dd = ds.all_data()\n",
-      "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(ds.current_time.in_units(\"Gyr\"))\n",
-      "rho_ex = np.array(rho_ex)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the minimum and the maximum:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n",
-      "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n",
-      "pylab.ylabel(\"Density ($g/cm^3$)\")\n",
-      "pylab.xlabel(\"Time (Gyr)\")\n",
-      "pylab.legend()\n",
-      "pylab.ylim(1e-32, 1e-21)\n",
-      "pylab.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 2: Advanced Time Series\n",
-      "\n",
-      "Let's do something a bit different.  Let's calculate the total mass inside halos and outside halos.\n",
-      "\n",
-      "This actually touches a lot of different pieces of machinery in yt.  For every dataset, we will run the halo finder HOP.  Then, we calculate the total mass in the domain.  Then, for each halo, we calculate the sum of the baryon mass in that halo.  We'll keep running tallies of these two things."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units import Msun\n",
-      "\n",
-      "mass = []\n",
-      "zs = []\n",
-      "for ds in ts:\n",
-      "    halos = HaloFinder(ds)\n",
-      "    dd = ds.all_data()\n",
-      "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    total_in_baryons = 0.0*Msun\n",
-      "    for halo in halos:\n",
-      "        sp = halo.get_sphere()\n",
-      "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    mass.append(total_in_baryons/total_mass)\n",
-      "    zs.append(ds.current_redshift)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now let's plot them!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogx(zs, mass, '-xb')\n",
-      "pylab.xlabel(\"Redshift\")\n",
-      "pylab.ylabel(\"Mass in halos / Total mass\")\n",
-      "pylab.xlim(max(zs), min(zs))\n",
-      "pylab.ylim(-0.01, .18)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Data Objects\n",
-      "\n",
-      "Time series data have many applications, but most of them rely on examining the underlying data in some way.  Below, we'll see how to use and manipulate data objects.\n",
-      "\n",
-      "### Ray Queries\n",
-      "\n",
-      "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
-      "\n",
-      "To create a ray, we specify the start and end points.\n",
-      "\n",
-      "Note that we need to convert these arrays to numpy arrays due to a bug in matplotlib 1.3.1."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
-      "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"dts\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"t\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"x\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Slice Queries\n",
-      "\n",
-      "While slices are often used for visualization, they can be useful for other operations as well.  yt regards slices as multi-resolution objects.  They are an array of cells that are not all the same size; it only returns the cells at the highest resolution that it intersects.  (This is true for all yt data objects.)  Slices and projections have the special fields `px`, `py`, `pdx` and `pdy`, which correspond to the coordinates and half-widths in the pixel plane."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "v, c = ds.find_max(\"density\")\n",
-      "sl = ds.slice(0, c[0])\n",
-      "print sl[\"index\", \"x\"]\n",
-      "print sl[\"index\", \"z\"]\n",
-      "print sl[\"pdx\"]\n",
-      "print sl[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to do something interesting with a `Slice`, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "frb = sl.to_frb((50.0, 'kpc'), 1024)\n",
-      "print frb[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of `density`, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "yt.write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
-      "from IPython.display import Image\n",
-      "Image(filename = \"temp.png\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Off-Axis Slices\n",
-      "\n",
-      "yt provides not only slices, but off-axis slices that are sometimes called \"cutting planes.\"  These are specified by (in order) a normal vector and a center.  Here we've set the normal vector to `[0.2, 0.3, 0.5]` and the center to be the point of maximum density.\n",
-      "\n",
-      "We can then turn these directly into plot windows using `to_pw`.  Note that the `to_pw` and `to_frb` methods are available on slices, off-axis slices, and projections, and can be used on any of them."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
-      "pw = cp.to_pw(fields = [(\"gas\", \"density\")])"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Once we have our plot window from our cutting plane, we can show it here."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pw.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can, as noted above, do the same with our slice:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pws = sl.to_pw(fields=[\"density\"])\n",
-      "#pws.show()\n",
-      "print pws.plots.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Covering Grids\n",
-      "\n",
-      "If we want to access a 3D array of data that spans multiple resolutions in our simulation, we can use a covering grid.  This will return a 3D array of data, drawing from up to the resolution level specified when creating the data.  For example, if you create a covering grid that spans two child grids of a single parent grid, it will fill those zones covered by a zone of a child grid with the data from that child grid.  Where it is covered only by the parent grid, the cells from the parent grid will be duplicated (appropriately) to fill the covering grid.\n",
-      "\n",
-      "There are two different types of covering grids: unsmoothed and smoothed.  Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc.  This will help to reduce edge effects.  Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n",
-      "\n",
-      "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2.  We can then ask for the Density field, which will be a 3D array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cg = ds.covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print cg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In this example, we do exactly the same thing: except we ask for a *smoothed* covering grid, which will reduce edge effects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "scg = ds.smoothed_covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print scg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ /dev/null
@@ -1,254 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Derived Fields and Profiles\n",
-      "\n",
-      "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk.  This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used.  Additionally, you can create them by just writing python functions."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from yt import derived_field\n",
-      "from matplotlib import pylab"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Derived Fields\n",
-      "\n",
-      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `dinosaurs` and our units are `K*cm/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "@derived_field(name = \"dinosaurs\", units = \"K * cm/s\")\n",
-      "def _dinos(field, data):\n",
-      "    return data[\"temperature\"] * data[\"velocity_magnitude\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One important thing to note is that derived fields must be defined *before* any datasets are loaded.  Let's load up our data and take a look at some quantities."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "dd = ds.all_data()\n",
-      "print dd.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `extrema` quantity -- the exact same way that we would for density, temperature, and so on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.extrema(\"dinosaurs\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can do the same for the average quantities as well."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.weighted_average_quantity(\"dinosaurs\", weight=\"temperature\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## A Few Other Quantities\n",
-      "\n",
-      "We can ask other quantities of our data, as well.  For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema.  All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10.0, 'kpc'))\n",
-      "bv = sp.quantities.bulk_velocity()\n",
-      "L = sp.quantities.angular_momentum_vector()\n",
-      "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
-      "print bv\n",
-      "print L\n",
-      "print rho_min, rho_max"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Profiles\n",
-      "\n",
-      "yt provides the ability to bin in 1, 2 and 3 dimensions.  This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n",
-      "\n",
-      "We do this using the objects `Profile1D`, `Profile2D`, and `Profile3D`.  The first two are the most common since they are the easiest to visualize.\n",
-      "\n",
-      "This first set of commands manually creates a profile object the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `temperature` and (previously-defined) `dinosaurs`.  We then plot it in a loglog plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
-      "prof.add_fields([\"temperature\",\"dinosaurs\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Temperature $(K)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the `dinosaurs` field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Dinosaurs $(K cm / s)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to see the total mass in every bin, we profile the `cell_mass` field with no weight.  Specifying `weight=None` will simply take the total value in every bin and add that up."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
-      "prof.add_fields([\"cell_mass\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Cell mass $(M_\\odot)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class.  Let's redo the last plot using `ProfilePlot`"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n",
-      "prof.set_unit('cell_mass', 'Msun')\n",
-      "prof.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Field Parameters\n",
-      "\n",
-      "Field parameters are a method of passing information to derived fields.  For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation.  yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off.  Here we show how that works:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp_small = ds.sphere(\"max\", (50.0, 'kpc'))\n",
-      "bv = sp_small.quantities.bulk_velocity()\n",
-      "\n",
-      "sp = ds.sphere(\"max\", (0.1, 'Mpc'))\n",
-      "rv1 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "sp.clear_data()\n",
-      "sp.set_field_parameter(\"bulk_velocity\", bv)\n",
-      "rv2 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "print bv\n",
-      "print rv1\n",
-      "print rv2"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/6)_Volume_Rendering.ipynb
--- a/doc/source/bootcamp/6)_Volume_Rendering.ipynb
+++ /dev/null
@@ -1,96 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:2a24bbe82955f9d948b39cbd1b1302968ff57f62f73afb2c7a5c4953393d00ae"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# A Brief Demo of Volume Rendering\n",
-      "\n",
-      "This shows a small amount of volume rendering.  Really, just enough to get your feet wet!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To create a volume rendering, we need a camera and a transfer function.  We'll use the `ColorTransferFunction`, which accepts (in log space) the minimum and maximum bounds of our transfer function.  This means behavior for data outside these values is undefined.\n",
-      "\n",
-      "We then add on \"layers\" like an onion.  This function can accept a width (here specified) in data units, and also a color map.  Here we add on four layers.\n",
-      "\n",
-      "Finally, we create a camera.  The focal point is `[0.5, 0.5, 0.5]`, the width is 20 kpc (including front-to-back integration) and we specify a transfer function.  Once we've done that, we call `show` to actually cast our rays and display them inline."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -24))\n",
-      "tf.add_layers(4, w=0.01)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
-      "cam.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cam.show(clip_ratio=4)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "There are several other options we can specify.  Note that here we have turned on the use of ghost zones, shortened the data interval for the transfer function, and widened our gaussian layers."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -25))\n",
-      "tf.add_layers(4, w=0.03)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
-      "cam.show(clip_ratio=4.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/data_inspection.rst
--- a/doc/source/bootcamp/data_inspection.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _data_inspection:
-
-Data Inspection
----------------
-
-.. notebook:: 2)_Data_Inspection.ipynb

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/data_objects_and_time_series.rst
--- a/doc/source/bootcamp/data_objects_and_time_series.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Data Objects and Time Series
-----------------------------
-
-.. notebook:: 4)_Data_Objects_and_Time_Series.ipynb

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/derived_fields_and_profiles.rst
--- a/doc/source/bootcamp/derived_fields_and_profiles.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Derived Fields and Profiles
----------------------------
-
-.. notebook:: 5)_Derived_Fields_and_Profiles.ipynb

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/index.rst
--- a/doc/source/bootcamp/index.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-.. _bootcamp:
-
-yt Bootcamp
-===========
-
-The bootcamp is a series of worked examples of how to use much of the
-funtionality of yt.  These are simple, short introductions to give you a taste
-of what the code can do and are not meant to be detailed walkthroughs.
-
-There are two ways in which you can go through the bootcamp: interactively and 
-non-interactively.  We recommend the interactive method, but if you're pressed 
-on time, you can non-interactively go through the linked pages below and view the 
-worked examples.
-
-To execute the bootcamp interactively, you need to download the repository and
-start the IPython notebook.  If you do not already have the yt repository, the
-easiest way to get the repository is to clone it using mercurial:
-
-.. code-block:: bash
-
-   hg clone https://bitbucket.org/yt_analysis/yt
-
-Now start the IPython notebook from within the repository:
-
-.. code-block:: bash
-
-   cd yt/doc/source/bootcamp
-   yt notebook
-
-This command will give you information about the notebook server and how to
-access it.  You will basically just pick a password (for security reasons) and then 
-redirect your web browser to point to the notebook server.
-Once you have done so, choose "Introduction" from the list of
-notebooks, which includes an introduction and information about how to download
-the sample data.
-
-.. warning:: The pre-filled out notebooks are *far* less fun than running them
-             yourselves!  Check out the repo and give it a try.
-
-Here are the notebooks, which have been filled in for inspection:
-
-.. toctree::
-   :maxdepth: 1
-
-   introduction
-   data_inspection
-   simple_visualization
-   data_objects_and_time_series
-   derived_fields_and_profiles
-   volume_rendering
-
-.. note::
-
-   The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
-   details.
-
-Let us know if you would like to contribute other example notebooks, or have
-any suggestions for how these can be improved.

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/introduction.rst
--- a/doc/source/bootcamp/introduction.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _bootcamp-introduction:
-
-Introduction
-------------
-
-.. notebook:: 1)_Introduction.ipynb

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/simple_visualization.rst
--- a/doc/source/bootcamp/simple_visualization.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Simple Visualization
---------------------
-
-.. notebook:: 3)_Simple_Visualization.ipynb

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/bootcamp/volume_rendering.rst
--- a/doc/source/bootcamp/volume_rendering.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Volume Rendering
-----------------
-
-.. notebook:: 6)_Volume_Rendering.ipynb

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -122,7 +122,7 @@
     bootswatch_theme = "readable",
     navbar_links = [
         ("How to get help", "help/index"),
-        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Quickstart notebooks", "quickstart/index"),
         ("Cookbook", "cookbook/index"),
         ],
     navbar_sidebarrel = False,

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -90,3 +90,14 @@
 See :ref:`filtering-particles` for more information.
 
 .. yt_cookbook:: particle_filter_sfr.py
+
+Making a Turbulent Kinetic Energy Power Spectrum
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe shows how to use `yt` to read data and put it on a uniform
+grid to interface with the NumPy FFT routines and create a turbulent
+kinetic energy power spectrum.  (Note: the dataset used here is of low
+resolution, so the turbulence is not very well-developed.  The spike
+at high wavenumbers is due to non-periodicity in the z-direction).
+
+.. yt_cookbook:: power_spectrum_example.py

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/cookbook/custom_colorbar_tickmarks.rst
--- a/doc/source/cookbook/custom_colorbar_tickmarks.rst
+++ b/doc/source/cookbook/custom_colorbar_tickmarks.rst
@@ -1,4 +1,4 @@
-Custom Colorabar Tickmarks
---------------------------
+Custom Colorbar Tickmarks
+-------------------------
 
 .. notebook:: custom_colorbar_tickmarks.ipynb

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/cookbook/power_spectrum_example.py
--- /dev/null
+++ b/doc/source/cookbook/power_spectrum_example.py
@@ -0,0 +1,118 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import yt
+
+"""
+Make a turbulent KE power spectrum.  Since we are stratified, we use
+a rho**(1/3) scaling to the velocity to get something that would
+look Kolmogorov (if the turbulence were fully developed).
+
+Ultimately, we aim to compute:
+
+                      1  ^      ^*                                           
+     E(k) = integral  -  V(k) . V(k) dS                                      
+                      2                                                      
+ 
+             n                                               ^               
+where V = rho  U is the density-weighted velocity field, and V is the
+FFT of V.
+ 
+(Note: sometimes we normalize by 1/volume to get a spectral
+energy density spectrum).
+
+
+"""
+ 
+
+def doit(ds):
+
+    # a FFT operates on uniformly gridded data.  We'll use the yt
+    # covering grid for this.
+
+    max_level = ds.index.max_level
+
+    ref = int(np.product(ds.ref_factors[0:max_level]))
+
+    low = ds.domain_left_edge
+    dims = ds.domain_dimensions*ref
+
+    nx, ny, nz = dims
+
+    nindex_rho = 1./3.
+
+    Kk = np.zeros( (nx/2+1, ny/2+1, nz/2+1))
+
+    for vel in [("gas", "velocity_x"), ("gas", "velocity_y"), 
+                ("gas", "velocity_z")]:
+
+        Kk += 0.5*fft_comp(ds, ("gas", "density"), vel,
+                           nindex_rho, max_level, low, dims)
+
+    # wavenumbers
+    L = (ds.domain_right_edge - ds.domain_left_edge).d
+
+    kx = np.fft.rfftfreq(nx)*nx/L[0]
+    ky = np.fft.rfftfreq(ny)*ny/L[1]
+    kz = np.fft.rfftfreq(nz)*nz/L[2]
+    
+    # physical limits to the wavenumbers
+    kmin = np.min(1.0/L)
+    kmax = np.max(0.5*dims/L)
+    
+    kbins = np.arange(kmin, kmax, kmin)
+    N = len(kbins)
+
+    # bin the Fourier KE into radial kbins
+    kx3d, ky3d, kz3d = np.meshgrid(kx, ky, kz, indexing="ij")
+    k = np.sqrt(kx3d**2 + ky3d**2 + kz3d**2)
+
+    whichbin = np.digitize(k.flat, kbins)
+    ncount = np.bincount(whichbin)
+    
+    E_spectrum = np.zeros(len(ncount)-1)
+
+    for n in range(1,len(ncount)):
+        E_spectrum[n-1] = np.sum(Kk.flat[whichbin==n])
+
+    k = 0.5*(kbins[0:N-1] + kbins[1:N])
+    E_spectrum = E_spectrum[1:N]
+
+    index = np.argmax(E_spectrum)
+    kmax = k[index]
+    Emax = E_spectrum[index]
+
+    plt.loglog(k, E_spectrum)
+    plt.loglog(k, Emax*(k/kmax)**(-5./3.), ls=":", color="0.5")
+
+    plt.xlabel(r"$k$")
+    plt.ylabel(r"$E(k)dk$")
+
+    plt.savefig("spectrum.png")
+
+
+def fft_comp(ds, irho, iu, nindex_rho, level, low, delta ):
+
+    cube = ds.covering_grid(level, left_edge=low,
+                            dims=delta,
+                            fields=[irho, iu])
+
+    rho = cube[irho].d
+    u = cube[iu].d
+
+    nx, ny, nz = rho.shape
+
+    # do the FFTs -- note that since our data is real, there will be
+    # too much information here.  fftn puts the positive freq terms in
+    # the first half of the axes -- that's what we keep.  Our
+    # normalization has an '8' to account for this clipping to one
+    # octant.
+    ru = np.fft.fftn(rho**nindex_rho * u)[0:nx/2+1,0:ny/2+1,0:nz/2+1]
+    ru = 8.0*ru/(nx*ny*nz)
+
+    return np.abs(ru)**2
+
+
+if __name__ == "__main__":
+
+    ds = yt.load("maestro_xrb_lores_23437")
+    doit(ds)

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -28,7 +28,7 @@
 * Analyzing
 * Examining
 * Cookbook
-* Bootcamp
+* Quickstart
 * Developing
 * Reference
 * Help

diff -r b0658872803e108385ece9c90d9715673f1b562f -r aa8a1f105a524391301a33342a3fa01918c7da44 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -170,10 +170,16 @@
 Developing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-If you plan to develop yt on Windows, we recommend using the `MinGW
+If you plan to develop yt on Windows, it is necessary to use the `MinGW
 <http://www.mingw.org/>`_ gcc compiler that can be installed using the `Anaconda
-Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the
-syntax for the setup command is slightly different; you must type:
+Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. The libpython package must be
+ installed from Anaconda as well. These can both be installed with a single command:
+
+.. code-block:: bash
+
+  $ conda install libpython mingw
+
+Additionally, the syntax for the setup command is slightly different; you must type:
 
 .. code-block:: bash
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/f3b537866cc3/
Changeset:   f3b537866cc3
Branch:      yt
User:        atmyers
Date:        2014-09-03 04:03:12+00:00
Summary:     pf -> ds
Affected #:  1 file

diff -r aa8a1f105a524391301a33342a3fa01918c7da44 -r f3b537866cc348b9fdcac3f8ca20fa73da520e46 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -45,10 +45,10 @@
 _zp_fields = ("rhs", "phi", "gravitational_field_x",
               "gravitational_field_y")
 zp = "ZeldovichPancake/plt32.2d.hdf5"
- at requires_pf(zp)
+ at requires_ds(zp)
 def test_zp():
-    pf = data_dir_load(zp)
-    yield assert_equal, str(pf), "plt32.2d.hdf5"
+    ds = data_dir_load(zp)
+    yield assert_equal, str(ds), "plt32.2d.hdf5"
     for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
         test_tb.__name__ = test.description
         yield test


https://bitbucket.org/yt_analysis/yt/commits/5143abe3de80/
Changeset:   5143abe3de80
Branch:      yt
User:        atmyers
Date:        2014-09-03 17:48:07+00:00
Summary:     removing charm frontend, which does not work in 3.0 anyway. Charm datasets should now use the generic Chombo frontend.
Affected #:  8 files

diff -r f3b537866cc348b9fdcac3f8ca20fa73da520e46 -r 5143abe3de800217bb9151d53d6e66cf68f0df6c yt/frontends/charm/api.py
--- a/yt/frontends/charm/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.charm
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CharmGrid, \
-      CharmHierarchy, \
-      CharmStaticOutput
-
-from .fields import \
-      CharmFieldInfo, \
-      add_charm_field
-
-from .io import \
-      IOHandlerCharmHDF5

diff -r f3b537866cc348b9fdcac3f8ca20fa73da520e46 -r 5143abe3de800217bb9151d53d6e66cf68f0df6c yt/frontends/charm/data_structures.py
--- a/yt/frontends/charm/data_structures.py
+++ /dev/null
@@ -1,341 +0,0 @@
-"""
-Data structures for Charm.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import re
-import os
-import weakref
-import numpy as np
-
-from collections import \
-     defaultdict
-from string import \
-     strip, \
-     rstrip
-from stat import \
-     ST_CTIME
-
-from .definitions import \
-     charm2enzoDict, \
-     yt2charmFieldsDict, \
-     parameterDict \
-
-from yt.funcs import *
-from yt.data_objects.grid_patch import \
-     AMRGridPatch
-from yt.data_objects.hierarchy import \
-     AMRHierarchy
-from yt.data_objects.static_output import \
-     StaticOutput
-from yt.utilities.definitions import \
-     mpc_conversion, sec_conversion
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_root_only
-from yt.utilities.io_handler import \
-    io_registry
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-from .fields import \
-    CharmFieldInfo, Charm2DFieldInfo, Charm1DFieldInfo, \
-    add_charm_field, add_charm_2d_field, add_charm_1d_field, \
-    KnownCharmFields
-
-class CharmGrid(AMRGridPatch):
-    _id_offset = 0
-    __slots__ = ["_level_id", "stop_index"]
-    def __init__(self, id, hierarchy, level, start, stop):
-        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
-                              hierarchy = hierarchy)
-        self.Parent = []
-        self.Children = []
-        self.Level = level
-        self.ActiveDimensions = stop - start + 1
-
-    def get_global_startindex(self):
-        """
-        Return the integer starting index for each dimension at the current
-        level.
-
-        """
-        if self.start_index != None:
-            return self.start_index
-        if self.Parent == []:
-            iLE = self.LeftEdge - self.pf.domain_left_edge
-            start_index = iLE / self.dds
-            return np.rint(start_index).astype('int64').ravel()
-        pdx = self.Parent[0].dds
-        start_index = (self.Parent[0].get_global_startindex()) + \
-            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
-        return self.start_index
-
-    def _setup_dx(self):
-        # has already been read in and stored in hierarchy
-        self.dds = self.hierarchy.dds_list[self.Level]
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-class CharmHierarchy(AMRHierarchy):
-
-    grid = CharmGrid
-    _data_file = None
-
-    def __init__(self,pf,data_style='charm_hdf5'):
-        self.domain_left_edge = pf.domain_left_edge
-        self.domain_right_edge = pf.domain_right_edge
-        self.data_style = data_style
-
-        if pf.dimensionality == 1:
-            self.data_style = "charm1d_hdf5"
-        if pf.dimensionality == 2:
-            self.data_style = "charm2d_hdf5"
-
-        self.field_indexes = {}
-        self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
-        self.hierarchy_filename = os.path.abspath(
-            self.parameter_file.parameter_filename)
-        self.directory = pf.fullpath
-        self._handle = pf._handle
-
-        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
-        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
-        AMRHierarchy.__init__(self,pf,data_style)
-        self._read_particles()
-
-    def _read_particles(self):
-        
-        self.num_particles = 0
-        particles_per_grid = []
-        for key, val in self._handle.items():
-            if key.startswith('level'):
-                level_particles = val['particles:offsets'][:]
-                self.num_particles += level_particles.sum()
-                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
-
-        for i, grid in enumerate(self.grids):
-            self.grids[i].NumberOfParticles = particles_per_grid[i]
-            self.grid_particle_count[i] = particles_per_grid[i]
-
-        assert(self.num_particles == self.grid_particle_count.sum())
-
-    def _detect_fields(self):
-        self.field_list = []
-        for key, val in self._handle.attrs.items():
-            if key.startswith("component"):
-                self.field_list.append(val)
-          
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        AMRHierarchy._setup_classes(self, dd)
-        self.object_types.sort()
-
-    def _count_grids(self):
-        self.num_grids = 0
-        for lev in self._levels:
-            self.num_grids += self._handle[lev]['Processors'].len()
-
-    def _parse_hierarchy(self):
-        f = self._handle # shortcut
-
-        grids = []
-        self.dds_list = []
-        i = 0
-        D = self.parameter_file.dimensionality
-        for lev_index, lev in enumerate(self._levels):
-            level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            try:
-                boxes = f[lev]['boxes'].value
-            except KeyError:
-                boxes = f[lev]['particles:boxes'].value
-            dx = f[lev].attrs['dx']
-            self.dds_list.append(dx * np.ones(3))
-
-            if D == 1:
-                self.dds_list[lev_index][1] = 1.0
-                self.dds_list[lev_index][2] = 1.0
-
-            if D == 2:
-                self.dds_list[lev_index][2] = 1.0
-
-            for level_id, box in enumerate(boxes):
-                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
-                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
-                
-                if D == 1:
-                    si = np.concatenate((si, [0.0, 0.0]))
-                    ei = np.concatenate((ei, [0.0, 0.0]))
-
-                if D == 2:
-                    si = np.concatenate((si, [0.0]))
-                    ei = np.concatenate((ei, [0.0]))
-
-                pg = self.grid(len(grids),self,level=level_number,
-                               start = si, stop = ei)
-                grids.append(pg)
-                grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
-                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
-                self.grid_particle_count[i] = 0
-                self.grid_dimensions[i] = ei - si + 1
-                i += 1
-        self.grids = np.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
-
-    def _populate_grid_objects(self):
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-
-        for g in self.grids:
-            g.Children = self._get_grid_children(g)
-            for g1 in g.Children:
-                g1.Parent.append(g)
-        self.max_level = self.grid_levels.max()
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
-
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
-class CharmStaticOutput(StaticOutput):
-    _hierarchy_class = CharmHierarchy
-    _fieldinfo_fallback = CharmFieldInfo
-    _fieldinfo_known = KnownCharmFields
-
-    def __init__(self, filename, data_style='charm_hdf5',
-                 storage_filename = None, ini_filename = None):
-        self._handle = h5py.File(filename,'r')
-        self.current_time = self._handle['level_0'].attrs['time']
-        self.ini_filename = ini_filename
-        self.fullplotdir = os.path.abspath(filename)
-        StaticOutput.__init__(self,filename,data_style)
-        self.storage_filename = storage_filename
-        self.cosmological_simulation = False
-
-        # These are parameters that I very much wish to get rid of.
-        self.parameters["HydroMethod"] = 'charm' # always PPM DE
-        self.parameters["DualEnergyFormalism"] = 0 
-        self.parameters["EOSType"] = -1 # default
-
-    def __del__(self):
-        self._handle.close()
-
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        self.units = {}
-        self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self._setup_nounits_units()
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        seconds = 1 #self["Time"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2charmFieldsDict:
-            self.conversion_factors[key] = 1.0
-
-    def _setup_nounits_units(self):
-        z = 0
-        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
-        if not self.has_key("TimeUnits"):
-            mylog.warning("No time units.  Setting 1.0 = 1 second.")
-            self.conversion_factors["Time"] = 1.0
-        for unit in mpc_conversion.keys():
-            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
-
-    def _parse_parameter_file(self):
-        
-        self.unique_identifier = \
-                               int(os.stat(self.parameter_filename)[ST_CTIME])
-        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
-        self.domain_left_edge = self.__calc_left_edge()
-        self.domain_right_edge = self.__calc_right_edge()
-        self.domain_dimensions = self.__calc_domain_dimensions()
-
-        if self.dimensionality == 1:
-            self._fieldinfo_fallback = Charm1DFieldInfo
-            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
-            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
-            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
-
-        if self.dimensionality == 2:
-            self._fieldinfo_fallback = Charm2DFieldInfo
-            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
-            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
-            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
-        
-        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True,) * self.dimensionality
-
-    def __calc_left_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        D = self.dimensionality
-        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
-        return LE
-
-    def __calc_right_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        D = self.dimensionality
-        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
-        return RE
-
-    def __calc_domain_dimensions(self):
-        fileh = self._handle
-        D = self.dimensionality
-        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
-        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
-        return R_index - L_index
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            valid = "Charm_global" in fileh["/"]
-            fileh.close()
-            return valid
-        except:
-            pass
-        return False
-
-    @parallel_root_only
-    def print_key_parameters(self):
-        for a in ["current_time", "domain_dimensions", "domain_left_edge",
-                  "domain_right_edge"]:
-            if not hasattr(self, a):
-                mylog.error("Missing %s in parameter file definition!", a)
-                continue
-            v = getattr(self, a)
-            mylog.info("Parameters: %-25s = %s", a, v)

diff -r f3b537866cc348b9fdcac3f8ca20fa73da520e46 -r 5143abe3de800217bb9151d53d6e66cf68f0df6c yt/frontends/charm/definitions.py
--- a/yt/frontends/charm/definitions.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-Various definitions for various other modules and routines
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                                 }
-
-charm2enzoDict = {"GAMMA": "Gamma",
-                  "Ref_ratio": "RefineBy"
-                                    }
-
-yt2charmFieldsDict = {}
-charm2ytFieldsDict = {}
-

diff -r f3b537866cc348b9fdcac3f8ca20fa73da520e46 -r 5143abe3de800217bb9151d53d6e66cf68f0df6c yt/frontends/charm/fields.py
--- a/yt/frontends/charm/fields.py
+++ /dev/null
@@ -1,150 +0,0 @@
-"""
-Charm-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    NullFunc, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-import numpy as np
-
-CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = CharmFieldInfo.add_field
-
-KnownCharmFields = FieldInfoContainer()
-add_charm_field = KnownCharmFields.add_field
-
-add_charm_field("potential", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("potential")],
-                units=r"")
-
-add_charm_field("density", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("density")],
-                units=r"")
-
-add_charm_field("gravitational_field_x", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_x")],
-                units=r"")
-
-add_charm_field("gravitational_field_y", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_y")],
-                units=r"")
-
-add_charm_field("gravitational_field_z", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_z")],
-                units=r"")
-
-def _Density(field, data):
-    return data["density"]
-add_field("Density",function=_Density, take_log=True,
-          units=r'\rm{g}/\rm{cm^3}')
-
-def particle_func(p_field, dtype='float64'):
-    def _Particles(field, data):
-        io = data.hierarchy.io
-        if not data.NumberOfParticles > 0:
-            return np.array([], dtype=dtype)
-        else:
-            return io._read_particles(data, p_field).astype(dtype)
-        
-    return _Particles
-
-_particle_field_list = ["mass",
-                        "position_x",
-                        "position_y",
-                        "position_z",
-                        "velocity_x",
-                        "velocity_y",
-                        "velocity_z",
-                        "acceleration_x",
-                        "acceleration_y",
-                        "acceleration_z"]
-
-for pf in _particle_field_list:
-    pfunc = particle_func("%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
-
-def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64')
-    return particles
-
-def _ParticleMassMsun(field, data):
-    particles = data["particle_mass"].astype('float64')
-    return particles/1.989e33
-
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True)
-add_field("ParticleMassMsun",
-          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
-          particle_type=True)
-
-#do overrides for 2D
-
-Charm2DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
-add_charm_2d_field = Charm2DFieldInfo.add_field
-
-def _gravitational_field_z(field, data):
-    return np.zeros(data['gravitational_field_x'].shape,
-                    dtype='float64')
-add_charm_2d_field("gravitational_field_z", function=_gravitational_field_z)
-
-def _particle_position_z(field, data):
-    return np.zeros(data['particle_position_x'].shape, dtype='float64')
-add_charm_2d_field("particle_position_z", function=_particle_position_z)
-
-def _particle_velocity_z(field, data):
-    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
-add_charm_2d_field("particle_velocity_z", function=_particle_velocity_z)
-
-def _particle_acceleration_z(field, data):
-    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
-add_charm_2d_field("particle_acceleration_z", function=_particle_acceleration_z)
-
-#do overrides for 1D
-
-Charm1DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
-add_charm_1d_field = Charm1DFieldInfo.add_field
-
-def _gravitational_field_y(field, data):
-    return np.zeros(data['gravitational_field_y'].shape,
-                    dtype='float64')
-
-def _particle_position_y(field, data):
-    return np.zeros(data['particle_position_x'].shape, dtype='float64')
-
-def _particle_velocity_y(field, data):
-    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
-
-def _particle_acceleration_y(field, data):
-    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
-
-add_charm_1d_field("gravitational_field_z", function=_gravitational_field_z)
-add_charm_1d_field("gravitational_field_y", function=_gravitational_field_y)
-
-add_charm_1d_field("particle_position_z", function=_particle_position_z)
-add_charm_1d_field("particle_velocity_z", function=_particle_velocity_z)
-add_charm_1d_field("particle_acceleration_z", function=_particle_acceleration_z)
-
-add_charm_1d_field("particle_position_y", function=_particle_position_y)
-add_charm_1d_field("particle_velocity_y", function=_particle_velocity_y)
-add_charm_1d_field("particle_acceleration_y", function=_particle_acceleration_y)
\ No newline at end of file

diff -r f3b537866cc348b9fdcac3f8ca20fa73da520e46 -r 5143abe3de800217bb9151d53d6e66cf68f0df6c yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ /dev/null
@@ -1,127 +0,0 @@
-"""
-The data-file handling functions
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import h5py
-import os
-import re
-import numpy as np
-
-from yt.utilities.io_handler import \
-           BaseIOHandler
-
-class IOHandlerCharmHDF5(BaseIOHandler):
-    _data_style = "charm_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'position_y': 1,
-                                      'position_z': 2,
-                                      'velocity_x': 3,
-                                      'velocity_y': 4,
-                                      'velocity_z': 5,
-                                      'acceleration_x': 6,
-                                      'acceleration_y': 7,
-                                      'acceleration_z': 8,
-                                      'mass': 9}
-
-    _field_dict = None
-    @property
-    def field_dict(self):
-        if self._field_dict is not None:
-            return self._field_dict
-        field_dict = {}
-        for key, val in self._handle.attrs.items():
-            if key.startswith('component_'):
-                comp_number = int(re.match('component_(\d)', key).groups()[0])
-                field_dict[val] = comp_number
-        self._field_dict = field_dict
-        return self._field_dict
-        
-    def _read_field_names(self, grid):
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
-    
-    def _read_data(self,grid,field):
-
-        lstring = 'level_%i' % grid.Level
-        lev = self._handle[lstring]
-        dims = grid.ActiveDimensions
-        boxsize = dims.prod()
-        
-        grid_offset = lev[self._offset_string][grid._level_id]
-        start = grid_offset+self.field_dict[field]*boxsize
-        stop = start + boxsize
-        data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
-
-    def _read_particles(self, grid, name):
-
-        field_index = self._particle_field_index[name]
-        lev = 'level_%s' % grid.Level
-
-        particles_per_grid = self._handle[lev]['particles:offsets'].value
-        items_per_particle = len(self._particle_field_index)
-
-        # compute global offset position
-        offsets = items_per_particle * np.cumsum(particles_per_grid)
-        offsets = np.append(np.array([0]), offsets)
-        offsets = np.array(offsets, dtype=np.int64)
-
-        # convert between the global grid id and the id on this level            
-        grid_levels = np.array([g.Level for g in self.pf.h.grids])
-        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
-        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
-        lo = grid.id - grid_level_offset
-        hi = lo + 1
-
-        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-        return data[field_index::items_per_particle]
-
-class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
-    _data_style = "charm2d_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'position_y': 1,
-                                      'velocity_x': 2,
-                                      'velocity_y': 3,
-                                      'acceleration_x': 4,
-                                      'acceleration_y': 5,
-                                      'mass': 6}
-
-
-class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
-    _data_style = "charm1d_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'velocity_x': 1,
-                                      'acceleration_x': 2,
-                                      'mass': 3}
\ No newline at end of file

diff -r f3b537866cc348b9fdcac3f8ca20fa73da520e46 -r 5143abe3de800217bb9151d53d6e66cf68f0df6c yt/frontends/charm/setup.py
--- a/yt/frontends/charm/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('charm', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r f3b537866cc348b9fdcac3f8ca20fa73da520e46 -r 5143abe3de800217bb9151d53d6e66cf68f0df6c yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -26,8 +26,6 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")
-    config.add_subpackage("pluto")
-    config.add_subpackage("charm")
     config.add_subpackage("flash/tests")
     config.add_subpackage("enzo/tests")
     config.add_subpackage("stream/tests")


https://bitbucket.org/yt_analysis/yt/commits/3f8b7beb0e71/
Changeset:   3f8b7beb0e71
Branch:      yt
User:        atmyers
Date:        2014-09-03 21:42:55+00:00
Summary:     adding a comment and moving determine_periodic up to the base class
Affected #:  2 files

diff -r 5143abe3de800217bb9151d53d6e66cf68f0df6c -r 3f8b7beb0e7108d6e7858139322fada629d3242b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -317,7 +317,14 @@
         self._determine_periodic()
 
     def _determine_periodic(self):
-        self.periodicity = (True, True, True)
+        # we default to true unless the HDF5 file says otherwise
+        is_periodic = np.array([True, True, True])
+        for dir in range(self.dimensionality):
+            try:
+                is_periodic[dir] = self._handle['/level_0'].attrs['is_periodic_%d' % dir]
+            except KeyError:
+                is_periodic[dir] = True
+        self.periodicity = tuple(is_periodic)
 
     def _calc_left_edge(self):
         fileh = self._handle
@@ -509,15 +516,6 @@
         if self.dimensionality == 2:
             self._field_info_class = ChomboPICFieldInfo2D
 
-    def _determine_periodic(self):
-        is_periodic = np.array([True, True, True])
-        for dir in [0, 1, 2]:
-            try:
-                is_periodic[dir] = self._handle['/level_0'].attrs['is_periodic_%d' % dir]
-            except KeyError:
-                is_periodic[dir] = True
-        self.periodicity = tuple(is_periodic)
-
     @classmethod
     def _is_valid(self, *args, **kwargs):
 

diff -r 5143abe3de800217bb9151d53d6e66cf68f0df6c -r 3f8b7beb0e7108d6e7858139322fada629d3242b yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -112,8 +112,11 @@
         ("particle_velocity_z", ("code_length / code_time", [], None)),
     )
 
+    # I am re-implementing this here to over-ride a few of the default behaviors:
+    # I don't want to skip output units for code_length and I want particle_fields
+    # to default to take_log = False. 
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
-        skip_output_units = () #("code_length",)
+        skip_output_units = ()
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):
             units = self.ds.field_units.get((ptype, f), units)
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \


https://bitbucket.org/yt_analysis/yt/commits/41462c461d6e/
Changeset:   41462c461d6e
Branch:      yt
User:        atmyers
Date:        2014-09-03 21:50:27+00:00
Summary:     set the number of ghost cells to zero in each direction if there is no information in the file
Affected #:  1 file

diff -r 3f8b7beb0e7108d6e7858139322fada629d3242b -r 41462c461d6ecc34e4e8516bc430cd3f30f7c895 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -41,7 +41,7 @@
             self.ghost = np.array(self.ghost)
         except KeyError:
             # assume zero ghosts if outputGhosts not present
-            self.ghost = np.array(self.dim)
+            self.ghost = np.zeros(self.dim)
 
     _field_dict = None
     @property


https://bitbucket.org/yt_analysis/yt/commits/090bd3e39069/
Changeset:   090bd3e39069
Branch:      yt
User:        atmyers
Date:        2014-09-03 22:08:04+00:00
Summary:     making changes suggested by Nathan and Chris
Affected #:  2 files

diff -r 41462c461d6ecc34e4e8516bc430cd3f30f7c895 -r 090bd3e390699d40bffb90ac7547f0e2fa7230a7 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -24,7 +24,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
+| Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Enzo                  |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r 41462c461d6ecc34e4e8516bc430cd3f30f7c895 -r 090bd3e390699d40bffb90ac7547f0e2fa7230a7 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -256,7 +256,7 @@
             self.dataset_type = 'chombo_hdf5'
 
         # some datasets will not be time-dependent, and to make
-        # make matters worse the simulation time is not always
+        # matters worse, the simulation time is not always
         # stored in the same place in the hdf file! Make
         # sure we handle that here.
         try:


https://bitbucket.org/yt_analysis/yt/commits/7a6f538529f2/
Changeset:   7a6f538529f2
Branch:      yt
User:        astrugarek
Date:        2014-09-04 20:30:34+00:00
Summary:     First version supporting the pluto files, in 2D or 3D, in cartesian geometry
Affected #:  4 files

diff -r 627128ec2f64d997bebfa5b40da835a977af4814 -r 7a6f538529f2a5a3d40d77b42ec6c2fb75b3dc59 yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -18,10 +18,14 @@
       ChomboHierarchy, \
       ChomboDataset, \
       Orion2Hierarchy, \
-      Orion2Dataset
+      Orion2Dataset,\
+      PlutoHierarchy, \
+      PlutoDataset
 
 from .fields import \
-      ChomboFieldInfo
+      ChomboFieldInfo,\
+      PlutoFieldInfo
 
 from .io import \
-      IOHandlerChomboHDF5
+      IOHandlerChomboHDF5,\
+      IOHandlerPlutoHDF5

diff -r 627128ec2f64d997bebfa5b40da835a977af4814 -r 7a6f538529f2a5a3d40d77b42ec6c2fb75b3dc59 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -42,7 +42,7 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo, Orion2FieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo, PlutoFieldInfo
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -341,7 +341,7 @@
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
             orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
 
-        if not (pluto_ini_file_exists and orion2_ini_file_exists):
+        if not (pluto_ini_file_exists or orion2_ini_file_exists):
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
@@ -363,6 +363,153 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
+class PlutoHierarchy(ChomboHierarchy):
+
+    def __init__(self, ds, dataset_type="pluto_chombo_native"):
+        ChomboHierarchy.__init__(self, ds, dataset_type)
+
+    def _parse_index(self):
+        f = self._handle # shortcut
+        self.max_level = f.attrs['num_levels'] - 1
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.dataset.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_levels[i] = level_number
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)+self.domain_left_edge.value
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)+self.domain_left_edge.value
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+
+class PlutoDataset(ChomboDataset):
+
+    _index_class = PlutoHierarchy
+    _field_info_class = PlutoFieldInfo
+
+    def __init__(self, filename, dataset_type='pluto_chombo_native',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
+
+    def _parse_parameter_file(self):
+        """
+        Check to see whether a 'pluto.ini' file
+        exists in the plot file directory. If one does, attempt to parse it.
+        Otherwise grab the dimensions from the hdf5 file.
+        """
+
+        pluto_ini_file_exists = False
+        dir_name = os.path.dirname(os.path.abspath(self.fullplotdir))
+        pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+        pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_dimensions = self._calc_domain_dimensions()
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+
+        if pluto_ini_file_exists:
+            lines=[line.strip() for line in open(pluto_ini_filename)]
+            self.domain_left_edge = np.zeros(self.dimensionality)
+            self.domain_right_edge = np.zeros(self.dimensionality)
+            for il,ll in enumerate(lines[lines.index('[Grid]')+2:lines.index('[Grid]')+2+self.dimensionality]):
+                self.domain_left_edge[il] = float(ll.split()[2])
+                self.domain_right_edge[il] = float(ll.split()[-1])
+            self.periodicity = [0]*3
+            for il,ll in enumerate(lines[lines.index('[Boundary]')+2:lines.index('[Boundary]')+2+6:2]):
+                self.periodicity[il] = (ll.split()[1] == 'periodic')
+            self.periodicity=tuple(self.periodicity)
+            for il,ll in enumerate(lines[lines.index('[Parameters]')+2:]):
+                if (ll.split()[0] == 'GAMMA'):
+                    self.gamma = float(ll.split()[1])
+        else:
+            self.domain_left_edge = self._calc_left_edge()
+            self.domain_right_edge = self._calc_right_edge()
+            self.periodicity = (True, True, True)
+
+        # if a lower-dimensional dataset, set up pseudo-3D stuff here.
+        if self.dimensionality == 1:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+        
+        if orion2_ini_file_exists:
+            return True
+
+        if pluto_ini_file_exists:
+            return True
+
+        if not (pluto_ini_file_exists and orion2_ini_file_exists):
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Chombo_global" in fileh["/"]
+                valid = 'CeilVA_mass' in fileh.attrs.keys()
+                fileh.close()
+                return valid
+            except:
+                pass
+        return False
+
+
+
 class Orion2Hierarchy(ChomboHierarchy):
 
     def __init__(self, ds, dataset_type="orion_chombo_native"):

diff -r 627128ec2f64d997bebfa5b40da835a977af4814 -r 7a6f538529f2a5a3d40d77b42ec6c2fb75b3dc59 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -27,6 +27,8 @@
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
+vel_units = "code_length / code_time"
+b_units = "code_magnetic"
 
 # Chombo does not have any known fields by itself.
 class ChomboFieldInfo(FieldInfoContainer):
@@ -88,3 +90,17 @@
                        units = "erg/cm**3")
         self.add_field("temperature", function=_temperature,
                        units="K")
+
+class PlutoFieldInfo(ChomboFieldInfo):
+    known_other_fields = (
+        ("rho", (rho_units, ["density"], None)),
+        ("prs", ("code_mass / (code_length * code_time**2)", ["pressure"], None)),
+        ("vx1", (vel_units, ["velocity_x"], None)),
+        ("vx2", (vel_units, ["velocity_y"], None)),
+        ("vx3", (vel_units, ["velocity_z"], None)),
+        ("bx1", (b_units, ["magnetic_field_x"], None)),
+        ("bx2", (b_units, ["magnetic_field_y"], None)),
+        ("bx3", (b_units, ["magnetic_field_z"], None)),
+    )
+
+    known_particle_fields = ()

diff -r 627128ec2f64d997bebfa5b40da835a977af4814 -r 7a6f538529f2a5a3d40d77b42ec6c2fb75b3dc59 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -179,6 +179,16 @@
         self.ds = ds
         self._handle = ds._handle   
 
+class IOHandlerPlutoHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "pluto_chombo_native"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, ds, *args, **kwargs):
+        BaseIOHandler.__init__(self, ds, *args, **kwargs)
+        self.ds = ds
+        self._handle = ds._handle
+
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"
 


https://bitbucket.org/yt_analysis/yt/commits/3f1c8d56ba44/
Changeset:   3f1c8d56ba44
Branch:      yt
User:        astrugarek
Date:        2014-09-08 11:12:27+00:00
Summary:     Adding test
Affected #:  1 file

diff -r 7a6f538529f2a5a3d40d77b42ec6c2fb75b3dc59 -r 3f1c8d56ba44cd75530e3dc5d5d4e72b31fa9e38 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -52,3 +52,15 @@
     for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
         test_tb.__name__ = test.description
         yield test
+
+_khp_fields = ("density", "velocity_magnitude", #"velocity_divergence",
+           "magnetic_field_x")
+
+kho = "KelvinHelmholtz/data.0004.hdf5"
+ at requires_ds(kho)
+def test_kho():
+    ds = data_dir_load(kho)
+    yield assert_equal, str(ds), "data.0004.hdf5"
+    for test in small_patch_amr(kho, _fields):
+        test_gc.__name__ = test.description
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/97646fd4e02f/
Changeset:   97646fd4e02f
Branch:      yt
User:        astrugarek
Date:        2014-09-09 20:29:56+00:00
Summary:     data_structures.py edited online with Bitbucket.
Update after A. Myers' comment
Affected #:  1 file

diff -r 3f1c8d56ba44cd75530e3dc5d5d4e72b31fa9e38 -r 97646fd4e02fb110ad4b7d7db20260bf488b8c4a yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -482,34 +482,17 @@
     def _is_valid(self, *args, **kwargs):
 
         pluto_ini_file_exists  = False
-        orion2_ini_file_exists = False
 
         if type(args[0]) == type(""):
             dir_name = os.path.dirname(os.path.abspath(args[0]))
             pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
-            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
-            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
-        
-        if orion2_ini_file_exists:
-            return True
 
         if pluto_ini_file_exists:
             return True
 
-        if not (pluto_ini_file_exists and orion2_ini_file_exists):
-            try:
-                fileh = h5py.File(args[0],'r')
-                valid = "Chombo_global" in fileh["/"]
-                valid = 'CeilVA_mass' in fileh.attrs.keys()
-                fileh.close()
-                return valid
-            except:
-                pass
         return False
 
-
-
 class Orion2Hierarchy(ChomboHierarchy):
 
     def __init__(self, ds, dataset_type="orion_chombo_native"):


https://bitbucket.org/yt_analysis/yt/commits/ef973e094f37/
Changeset:   ef973e094f37
Branch:      yt
User:        atmyers
Date:        2014-09-16 20:43:46+00:00
Summary:     merging in Antoine's Pluto changesets and fixing conflicts
Affected #:  5 files

diff -r 090bd3e390699d40bffb90ac7547f0e2fa7230a7 -r ef973e094f370f968a23ca6e12faa8b0d10a3ebd yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -14,20 +14,24 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-      ChomboGrid, \
-      ChomboHierarchy, \
-      ChomboDataset, \
-      Orion2Hierarchy, \
-      Orion2Dataset, \
-      ChomboPICHierarchy, \
-      ChomboPICDataset
+    ChomboGrid, \
+    ChomboHierarchy, \
+    ChomboDataset, \
+    Orion2Hierarchy, \
+    Orion2Dataset, \
+    ChomboPICHierarchy, \
+    ChomboPICDataset, \
+    PlutoHierarchy, \
+    PlutoDataset
 
 from .fields import \
-      ChomboFieldInfo, \
-      Orion2FieldInfo, \
-      ChomboPICFieldInfo1D, \
-      ChomboPICFieldInfo2D, \
-      ChomboPICFieldInfo3D
+    ChomboFieldInfo, \
+    Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, \
+    ChomboPICFieldInfo2D, \
+    ChomboPICFieldInfo3D, \
+    PlutoFieldInfo
 
 from .io import \
-      IOHandlerChomboHDF5
+    IOHandlerChomboHDF5,\
+    IOHandlerPlutoHDF5

diff -r 090bd3e390699d40bffb90ac7547f0e2fa7230a7 -r ef973e094f370f968a23ca6e12faa8b0d10a3ebd yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -43,7 +43,8 @@
     io_registry
 
 from .fields import ChomboFieldInfo, Orion2FieldInfo, \
-    ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D 
+    ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D, \
+    PlutoFieldInfo
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -124,7 +125,7 @@
         # only do anything if the dataset contains particles
         if not any([f[1].startswith('particle_') for f in self.field_list]):
             return
-        
+
         self.num_particles = 0
         particles_per_grid = []
         for key, val in self._handle.items():
@@ -190,7 +191,7 @@
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
-                
+
                 if D == 1:
                     si = np.concatenate((si, [0.0, 0.0]))
                     ei = np.concatenate((ei, [0.0, 0.0]))
@@ -294,7 +295,7 @@
         return f
 
     def _parse_parameter_file(self):
-        
+
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
         self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
@@ -312,7 +313,7 @@
             self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
-        
+
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self._determine_periodic()
 
@@ -350,17 +351,17 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
-        pluto_ini_file_exists  = False
+        pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
-        if type(args[0]) == type(""):
+        if isinstance(args[0], str):
             dir_name = os.path.dirname(os.path.abspath(args[0]))
             pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
             orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
 
-        if not (pluto_ini_file_exists and orion2_ini_file_exists):
+        if not (pluto_ini_file_exists or orion2_ini_file_exists):
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
@@ -383,6 +384,136 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
+class PlutoHierarchy(ChomboHierarchy):
+
+    def __init__(self, ds, dataset_type="pluto_chombo_native"):
+        ChomboHierarchy.__init__(self, ds, dataset_type)
+
+    def _parse_index(self):
+        f = self._handle # shortcut
+        self.max_level = f.attrs['num_levels'] - 1
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.dataset.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_levels[i] = level_number
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)+self.domain_left_edge.value
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)+self.domain_left_edge.value
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+
+class PlutoDataset(ChomboDataset):
+
+    _index_class = PlutoHierarchy
+    _field_info_class = PlutoFieldInfo
+
+    def __init__(self, filename, dataset_type='pluto_chombo_native',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
+
+    def _parse_parameter_file(self):
+        """
+        Check to see whether a 'pluto.ini' file
+        exists in the plot file directory. If one does, attempt to parse it.
+        Otherwise grab the dimensions from the hdf5 file.
+        """
+
+        pluto_ini_file_exists = False
+        dir_name = os.path.dirname(os.path.abspath(self.fullplotdir))
+        pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+        pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_dimensions = self._calc_domain_dimensions()
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+
+        if pluto_ini_file_exists:
+            lines=[line.strip() for line in open(pluto_ini_filename)]
+            self.domain_left_edge = np.zeros(self.dimensionality)
+            self.domain_right_edge = np.zeros(self.dimensionality)
+            for il,ll in enumerate(lines[lines.index('[Grid]')+2:lines.index('[Grid]')+2+self.dimensionality]):
+                self.domain_left_edge[il] = float(ll.split()[2])
+                self.domain_right_edge[il] = float(ll.split()[-1])
+            self.periodicity = [0]*3
+            for il,ll in enumerate(lines[lines.index('[Boundary]')+2:lines.index('[Boundary]')+2+6:2]):
+                self.periodicity[il] = (ll.split()[1] == 'periodic')
+            self.periodicity=tuple(self.periodicity)
+            for il,ll in enumerate(lines[lines.index('[Parameters]')+2:]):
+                if (ll.split()[0] == 'GAMMA'):
+                    self.gamma = float(ll.split()[1])
+        else:
+            self.domain_left_edge = self._calc_left_edge()
+            self.domain_right_edge = self._calc_right_edge()
+            self.periodicity = (True, True, True)
+
+        # if a lower-dimensional dataset, set up pseudo-3D stuff here.
+        if self.dimensionality == 1:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+
+        if pluto_ini_file_exists:
+            return True
+
+        return False
+
 class Orion2Hierarchy(ChomboHierarchy):
 
     def __init__(self, ds, dataset_type="orion_chombo_native"):
@@ -469,7 +600,7 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
-        pluto_ini_file_exists  = False
+        pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
         if type(args[0]) == type(""):
@@ -478,7 +609,7 @@
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
             orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
-        
+
         if orion2_ini_file_exists:
             return True
 
@@ -505,10 +636,10 @@
     _field_info_class = ChomboPICFieldInfo3D
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
-                 storage_filename = None, ini_filename = None):
+                 storage_filename=None, ini_filename=None):
 
-        ChomboDataset.__init__(self, filename, dataset_type, 
-                    storage_filename, ini_filename)
+        ChomboDataset.__init__(self, filename, dataset_type,
+                               storage_filename, ini_filename)
 
         if self.dimensionality == 1:
             self._field_info_class = ChomboPICFieldInfo1D
@@ -519,25 +650,27 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
-        pluto_ini_file_exists  = False
+        pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
-        if type(args[0]) == type(""):
+        if isinstance(args[0], str):
             dir_name = os.path.dirname(os.path.abspath(args[0]))
             pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
             orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
-        
+
         if orion2_ini_file_exists:
-            return True
+            return False
 
-        if not pluto_ini_file_exists:
-            try:
-                fileh = h5py.File(args[0],'r')
-                valid = "Charm_global" in fileh["/"]
-                fileh.close()
-                return valid
-            except:
-                pass
+        if pluto_ini_file_exists:
+            return False
+
+        try:
+            fileh = h5py.File(args[0],'r')
+            valid = "Charm_global" in fileh["/"]
+            fileh.close()
+            return valid
+        except:
+            pass
         return False

diff -r 090bd3e390699d40bffb90ac7547f0e2fa7230a7 -r ef973e094f370f968a23ca6e12faa8b0d10a3ebd yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -32,6 +32,8 @@
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
+vel_units = "code_length / code_time"
+b_units = "code_magnetic"
 
 # Chombo does not have any known fields by itself.
 class ChomboFieldInfo(FieldInfoContainer):
@@ -94,6 +96,7 @@
         self.add_field("temperature", function=_temperature,
                        units="K")
 
+
 class ChomboPICFieldInfo3D(FieldInfoContainer):
     known_other_fields = (
         ("density", (rho_units, ["density", "Density"], None)),
@@ -112,9 +115,9 @@
         ("particle_velocity_z", ("code_length / code_time", [], None)),
     )
 
-    # I am re-implementing this here to over-ride a few of the default behaviors:
-    # I don't want to skip output units for code_length and I want particle_fields
-    # to default to take_log = False. 
+    # I am re-implementing this here to override a few default behaviors:
+    # I don't want to skip output units for code_length and I do want
+    # particle_fields to default to take_log = False.
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
         skip_output_units = ()
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):
@@ -204,7 +207,7 @@
         for ftype in fluid_field_types:
             self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
                             units = "code_length / code_time**2")
-        
+
         for ptype in particle_field_types:                
             self.add_field((ptype, "particle_position_z"), function = _dummy_position,
                            particle_type = True,
@@ -228,7 +231,7 @@
 
     def __init__(self, pf, field_list):
         super(ChomboPICFieldInfo1D, self).__init__(pf, field_list)
-        
+
         for ftype in fluid_field_types:
             self.add_field((ftype, 'gravitational_field_y'), function = _dummy_field, 
                             units = "code_length / code_time**2")
@@ -249,3 +252,17 @@
             self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
                            particle_type = True,
                            units = "code_length / code_time")
+
+class PlutoFieldInfo(ChomboFieldInfo):
+    known_other_fields = (
+        ("rho", (rho_units, ["density"], None)),
+        ("prs", ("code_mass / (code_length * code_time**2)", ["pressure"], None)),
+        ("vx1", (vel_units, ["velocity_x"], None)),
+        ("vx2", (vel_units, ["velocity_y"], None)),
+        ("vx3", (vel_units, ["velocity_z"], None)),
+        ("bx1", (b_units, ["magnetic_field_x"], None)),
+        ("bx2", (b_units, ["magnetic_field_y"], None)),
+        ("bx3", (b_units, ["magnetic_field_z"], None)),
+    )
+
+    known_particle_fields = ()

diff -r 090bd3e390699d40bffb90ac7547f0e2fa7230a7 -r ef973e094f370f968a23ca6e12faa8b0d10a3ebd yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -197,6 +197,16 @@
         self._handle = pf._handle   
         self._read_ghost_info()
 
+class IOHandlerPlutoHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "pluto_chombo_native"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, ds, *args, **kwargs):
+        BaseIOHandler.__init__(self, ds, *args, **kwargs)
+        self.ds = ds
+        self._handle = ds._handle
+
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"
 

diff -r 090bd3e390699d40bffb90ac7547f0e2fa7230a7 -r ef973e094f370f968a23ca6e12faa8b0d10a3ebd yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -52,3 +52,15 @@
     for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
         test_tb.__name__ = test.description
         yield test
+
+_khp_fields = ("density", "velocity_magnitude", #"velocity_divergence",
+           "magnetic_field_x")
+
+kho = "KelvinHelmholtz/data.0004.hdf5"
+ at requires_ds(kho)
+def test_kho():
+    ds = data_dir_load(kho)
+    yield assert_equal, str(ds), "data.0004.hdf5"
+    for test in small_patch_amr(kho, _fields):
+        test_gc.__name__ = test.description
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/e9872945258d/
Changeset:   e9872945258d
Branch:      yt
User:        atmyers
Date:        2014-09-16 20:50:14+00:00
Summary:     pf -> ds
Affected #:  3 files

diff -r ef973e094f370f968a23ca6e12faa8b0d10a3ebd -r e9872945258de356e8af363b77a3fd10e839082e yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -627,8 +627,8 @@
 
 class ChomboPICHierarchy(ChomboHierarchy):
 
-    def __init__(self, pf, dataset_type="chombo_hdf5"):
-        ChomboHierarchy.__init__(self, pf, dataset_type)
+    def __init__(self, ds, dataset_type="chombo_hdf5"):
+        ChomboHierarchy.__init__(self, ds, dataset_type)
 
 class ChomboPICDataset(ChomboDataset):
 

diff -r ef973e094f370f968a23ca6e12faa8b0d10a3ebd -r e9872945258de356e8af363b77a3fd10e839082e yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -201,8 +201,8 @@
         ("particle_velocity_y", ("code_length / code_time", [], None)),
     )
 
-    def __init__(self, pf, field_list):
-        super(ChomboPICFieldInfo2D, self).__init__(pf, field_list)
+    def __init__(self, ds, field_list):
+        super(ChomboPICFieldInfo2D, self).__init__(ds, field_list)
 
         for ftype in fluid_field_types:
             self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
@@ -229,8 +229,8 @@
         ("particle_velocity_x", ("code_length / code_time", [], None)),
     )
 
-    def __init__(self, pf, field_list):
-        super(ChomboPICFieldInfo1D, self).__init__(pf, field_list)
+    def __init__(self, ds, field_list):
+        super(ChomboPICFieldInfo1D, self).__init__(ds, field_list)
 
         for ftype in fluid_field_types:
             self.add_field((ftype, 'gravitational_field_y'), function = _dummy_field, 

diff -r ef973e094f370f968a23ca6e12faa8b0d10a3ebd -r e9872945258de356e8af363b77a3fd10e839082e yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -19,17 +19,17 @@
 from yt.utilities.logger import ytLogger as mylog
 
 from yt.utilities.io_handler import \
-           BaseIOHandler
+    BaseIOHandler
 
 class IOHandlerChomboHDF5(BaseIOHandler):
     _dataset_type = "chombo_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, pf, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
+    def __init__(self, ds, *args, **kwargs):
+        BaseIOHandler.__init__(self, ds, *args, **kwargs)
+        self.ds = ds
+        self._handle = ds._handle
         self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
         self._read_ghost_info()
 
@@ -178,10 +178,10 @@
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, pf, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
+    def __init__(self, ds, *args, **kwargs):
+        BaseIOHandler.__init__(self, ds, *args, **kwargs)
+        self.ds = ds
+        self._handle = ds._handle
         self.dim = 2
         self._read_ghost_info()
 
@@ -190,11 +190,11 @@
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, pf, *args, **kwargs)
-        self.pf = pf
+    def __init__(self, ds, *args, **kwargs):
+        BaseIOHandler.__init__(self, ds, *args, **kwargs)
+        self.ds = ds
         self.dim = 1
-        self._handle = pf._handle   
+        self._handle = ds._handle
         self._read_ghost_info()
 
 class IOHandlerPlutoHDF5(IOHandlerChomboHDF5):
@@ -213,7 +213,7 @@
     def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files
-             
+
         """
 
         fn = grid.ds.fullplotdir[:-4] + "sink"


https://bitbucket.org/yt_analysis/yt/commits/89bc18b61419/
Changeset:   89bc18b61419
Branch:      yt
User:        atmyers
Date:        2014-09-16 20:52:04+00:00
Summary:     removing hyphenated field names from the alias list
Affected #:  1 file

diff -r e9872945258de356e8af363b77a3fd10e839082e -r 89bc18b614194090c6a0caee3e64feeec74255c9 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -101,9 +101,9 @@
     known_other_fields = (
         ("density", (rho_units, ["density", "Density"], None)),
         ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
-        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
-        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
-        ("gravitational_field_z", ("code_length / code_time**2", ["gravitational-field-z"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_z", ("code_length / code_time**2", [], None)),
     )
     known_particle_fields = (
         ("particle_mass", ("code_mass", [], None)),
@@ -190,8 +190,8 @@
     known_other_fields = (
         ("density", (rho_units, ["density", "Density"], None)),
         ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
-        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
-        ("gravitational_field_y", ("code_length / code_time**2", ["gravitational-field-y"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", [], None)),
     )
     known_particle_fields = (
         ("particle_mass", ("code_mass", [], None)),
@@ -221,7 +221,7 @@
     known_other_fields = (
         ("density", (rho_units, ["density", "Density"], None)),
         ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
-        ("gravitational_field_x", ("code_length / code_time**2", ["gravitational-field-x"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
     )
     known_particle_fields = (
         ("particle_mass", ("code_mass", [], None)),


https://bitbucket.org/yt_analysis/yt/commits/02bb1d3ce8cb/
Changeset:   02bb1d3ce8cb
Branch:      yt
User:        atmyers
Date:        2014-09-16 21:44:32+00:00
Summary:     fixing bug in the vector magnitude fields that only affected lo-dimensional datasets
Affected #:  1 file

diff -r 89bc18b614194090c6a0caee3e64feeec74255c9 -r 02bb1d3ce8cb3601f32d64699e61794bb6a8f18e yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -43,18 +43,14 @@
                            ftype = "gas", slice_info = None,
                            validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _magnitude(field, data):
-        mag  = data[xn] * data[xn]
-        mag += data[yn] * data[yn]
-        mag += data[zn] * data[zn]
+        fn = field_components[0]
+        mag = data[fn] * data[fn]
+        for idim in np.arange(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            mag += data[fn] * data[fn]
         return np.sqrt(mag)
 
     registry.add_field((ftype, "%s_magnitude" % basename),


https://bitbucket.org/yt_analysis/yt/commits/d47f280595fb/
Changeset:   d47f280595fb
Branch:      yt
User:        atmyers
Date:        2014-09-16 22:26:46+00:00
Summary:     making the same fix in create_squared_field, too
Affected #:  2 files

diff -r 02bb1d3ce8cb3601f32d64699e61794bb6a8f18e -r d47f280595fbd2d574116dcd57ce139373eb2895 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -61,18 +61,14 @@
                          ftype = "gas", slice_info = None,
                          validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _squared(field, data):
-        squared  = data[xn] * data[xn]
-        squared += data[yn] * data[yn]
-        squared += data[zn] * data[zn]
+        fn = field_components[0]
+        squared  = data[fn] * data[fn]
+        for idim in np.arange(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            squared += data[fn] * data[fn]
         return squared
 
     registry.add_field((ftype, "%s_squared" % basename),

diff -r 02bb1d3ce8cb3601f32d64699e61794bb6a8f18e -r d47f280595fbd2d574116dcd57ce139373eb2895 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -53,9 +53,6 @@
         test_tb.__name__ = test.description
         yield test
 
-_khp_fields = ("density", "velocity_magnitude", #"velocity_divergence",
-           "magnetic_field_x")
-
 kho = "KelvinHelmholtz/data.0004.hdf5"
 @requires_ds(kho)
 def test_kho():


https://bitbucket.org/yt_analysis/yt/commits/858828e7276b/
Changeset:   858828e7276b
Branch:      yt
User:        atmyers
Date:        2014-09-16 22:34:27+00:00
Summary:     merging from mainline
Affected #:  93 files

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -129,7 +129,14 @@
 are center_of_mass and bulk_velocity. Their definitions are available in 
 ``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
 your quantity may be of use to the general community, add it to 
-``halo_quantities.py`` and issue a pull request.
+``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
+
+* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
+* ``particle_mass`` -- Mass of halo
+* ``particle_position_x`` -- Location of halo
+* ``particle_position_y`` -- Location of halo
+* ``particle_position_z`` -- Location of halo
+* ``virial_radius`` -- Virial radius of halo
 
 An example of adding a quantity:
 

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -75,7 +75,8 @@
   mass. In simulations where the highest-resolution particles all have the 
   same mass (ie: zoom-in grid based simulations), one can set up a particle
   filter to select the lowest mass particles and perform the halo finding
-  only on those.
+  only on those.  See the this cookbook recipe for an example: 
+  :ref:`cookbook-rockstar-nested-grid`.
 
 To run the Rockstar Halo finding, you must launch python with MPI and 
 parallelization enabled. While Rockstar itself does not require MPI to run, 

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -263,7 +263,7 @@
 
    ds = load("my_data")
    sp = ds.sphere('c', (10, 'kpc'))
-   print ad.quantities.angular_momentum_vector()
+   print sp.quantities.angular_momentum_vector()
 
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -14,6 +14,22 @@
 
 .. yt_cookbook:: halo_plotting.py
 
+.. _cookbook-rockstar-nested-grid:
+
+Running Rockstar to Find Halos on Multi-Resolution-Particle Datasets
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The version of Rockstar installed with yt does not have the capability
+to work on datasets with particles of different masses.  Unfortunately,
+many simulations possess particles of different masses, notably cosmological 
+zoom datasets.  This recipe uses Rockstar in two different ways to generate a 
+HaloCatalog from the highest resolution dark matter particles (the ones 
+inside the zoom region).  It then overlays some of those halos on a projection
+as a demonstration.  See :ref:`halo-analysis` and :ref:`annotate-halos` for
+more information.
+
+.. yt_cookbook:: rockstar_nest.py
+
 .. _cookbook-halo_finding:
 
 Halo Profiling and Custom Analysis

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/cookbook/power_spectrum_example.py
--- a/doc/source/cookbook/power_spectrum_example.py
+++ b/doc/source/cookbook/power_spectrum_example.py
@@ -57,7 +57,7 @@
     
     # physical limits to the wavenumbers
     kmin = np.min(1.0/L)
-    kmax = np.max(0.5*dims/L)
+    kmax = np.min(0.5*dims/L)
     
     kbins = np.arange(kmin, kmax, kmin)
     N = len(kbins)
@@ -112,7 +112,6 @@
     return np.abs(ru)**2
 
 
-if __name__ == "__main__":
 
-    ds = yt.load("maestro_xrb_lores_23437")
-    doit(ds)
+ds = yt.load("maestro_xrb_lores_23437")
+doit(ds)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/cookbook/rockstar_nest.py
--- /dev/null
+++ b/doc/source/cookbook/rockstar_nest.py
@@ -0,0 +1,74 @@
+# You must run this job in parallel.  
+# There are several mpi flags which can be useful in order for it to work OK.
+# It requires at least 3 processors in order to run because of the way in which 
+# rockstar divides up the work.  Make sure you have mpi4py installed as per 
+# http://yt-project.org/docs/dev/analyzing/parallel_computation.html#setting-up-parallel-yt
+    
+# Usage: mpirun -np <num_procs> --mca btl ^openib python this_script.py
+
+import yt
+from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
+from yt.data_objects.particle_filters import add_particle_filter
+from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+yt.enable_parallelism() # rockstar halofinding requires parallelism
+
+# Create a dark matter particle filter
+# This will be code dependent, but this function here is true for enzo
+
+def DarkMatter(pfilter, data):
+    filter = data[("all", "particle_type")] == 1 # DM = 1, Stars = 2
+    return filter
+
+add_particle_filter("dark_matter", function=DarkMatter, filtered_type='all', \
+                    requires=["particle_type"])
+
+# First, we make sure that this script is being run using mpirun with
+# at least 3 processors as indicated in the comments above.
+assert(yt.communication_system.communicators[-1].size >= 3)
+
+# Load the dataset and apply dark matter filter
+fn = "Enzo_64/DD0043/data0043"
+ds = yt.load(fn)
+ds.add_particle_filter('dark_matter')
+
+# Determine highest resolution DM particle mass in sim by looking
+# at the extrema of the dark_matter particle_mass field.
+ad = ds.all_data()
+min_dm_mass = ad.quantities.extrema(('dark_matter','particle_mass'))[0]
+
+# Define a new particle filter to isolate all highest resolution DM particles
+# and apply it to dataset
+def MaxResDarkMatter(pfilter, data):
+    return data["particle_mass"] <= 1.01 * min_dm_mass
+
+add_particle_filter("max_res_dark_matter", function=MaxResDarkMatter, \
+                    filtered_type='dark_matter', requires=["particle_mass"])
+ds.add_particle_filter('max_res_dark_matter')
+
+# If desired, we can see the total number of DM and High-res DM particles
+#if yt.is_root():
+#    print "Simulation has %d DM particles." % ad['dark_matter','particle_type'].shape
+#    print "Simulation has %d Highest Res DM particles." % ad['max_res_dark_matter', 'particle_type'].shape
+
+# Run the halo catalog on the dataset only on the highest resolution dark matter 
+# particles
+hc = HaloCatalog(data_ds=ds, finder_method='rockstar', \
+                 finder_kwargs={'dm_only':True, 'particle_type':'max_res_dark_matter'})
+hc.create()
+
+# Or alternatively, just run the RockstarHaloFinder and later import the 
+# output file as necessary.  You can skip this step if you've already run it
+# once, but be careful since subsequent halo finds will overwrite this data.
+#rhf = RockstarHaloFinder(ds, particle_type="max_res_dark_matter")
+#rhf.run()
+# Load the halo list from a rockstar output for this dataset
+# Create a projection with the halos overplot on top
+#halos = yt.load('rockstar_halos/halos_0.0.bin')
+#hc = HaloCatalog(halos_ds=halos)
+#hc.load()
+
+# Regardless of your method of creating the halo catalog, use it to overplot the
+# halos on a projection.
+p = yt.ProjectionPlot(ds, "x", "density")
+p.annotate_halos(hc, annotate_field = 'particle_identifier', width=(10,'Mpc'), factor=2)
+p.save()

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/cookbook/tests/test_cookbook.py
--- /dev/null
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""Module for cookbook testing
+
+
+This test should be run from main yt directory.
+
+Example:
+
+      $ sed -e '/where/d' -i nose.cfg setup.cfg
+      $ nosetests doc/source/cookbook/tests/test_cookbook.py -P -v
+"""
+import glob
+import os
+import sys
+
+sys.path.append(os.path.join(os.getcwd(), "doc/source/cookbook"))
+
+
+def test_recipe():
+    '''Dummy test grabbing all cookbook's recipes'''
+    for fname in glob.glob("doc/source/cookbook/*.py"):
+        module_name = os.path.splitext(os.path.basename(fname))[0]
+        yield check_recipe, module_name
+
+
+def check_recipe(module_name):
+    '''Run single recipe'''
+    __import__(module_name)
+    assert True

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -4,7 +4,7 @@
 ds = yt.load("Enzo_64/DD0030/data0030")
 
 # Make a projection that is the full width of the domain,
-# but only 10 Mpc in depth.  This is done by creating a
+# but only 5 Mpc in depth.  This is done by creating a
 # region object with this exact geometry and providing it
 # as a data_source for the projection.
 
@@ -17,12 +17,12 @@
 right_corner = ds.domain_right_edge
 
 # Now adjust the size of the region along the line of sight (x axis).
-depth = ds.quan(10.0,'Mpc')
+depth = ds.quan(5.0,'Mpc')
 left_corner[0] = center[0] - 0.5 * depth
-left_corner[0] = center[0] + 0.5 * depth
+right_corner[0] = center[0] + 0.5 * depth
 
 # Create the region
-region = ds.region(center, left_corner, right_corner)
+region = ds.box(left_corner, right_corner)
 
 # Create a density projection and supply the region we have just created.
 # Only cells within the region will be included in the projection.

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Particle_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -74,7 +74,7 @@
       "import yt\n",
       "from yt.units import parsec, Msun\n",
       "\n",
-      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppy), max(ppy)]])\n",
+      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppz), max(ppz)]])\n",
       "\n",
       "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
      ],

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/examining/Loading_Spherical_Data.ipynb
--- /dev/null
+++ b/doc/source/examining/Loading_Spherical_Data.ipynb
@@ -0,0 +1,188 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:88ed88ce8d8f4a359052f287aea17a7cbed435ff960e195097b440191ce6c2ab"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Loading Spherical Data\n",
+      "\n",
+      "With version 3.0 of yt, it has gained the ability to load data from non-Cartesian systems.  This support is still being extended, but here is an example of how to load spherical data from a regularly-spaced grid.  For irregularly spaced grids, a similar setup can be used, but the `load_hexahedral_mesh` method will have to be used instead.\n",
+      "\n",
+      "Note that in yt, \"spherical\" means that it is ordered $r$, $\\theta$, $\\phi$, where $\\theta$ is the declination from the azimuth (running from $0$ to $\\pi$ and $\\phi$ is the angle around the zenith (running from $0$ to $2\\pi$).\n",
+      "\n",
+      "We first start out by loading yt."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import numpy as np\n",
+      "import yt"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we create a few derived fields.  The first three are just straight translations of the Cartesian coordinates, so that we can see where we are located in the data, and understand what we're seeing.  The final one is just a fun field that is some combination of the three coordinates, and will vary in all dimensions."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "@yt.derived_field(name = \"sphx\", units = \"cm\", take_log=False)\n",
+      "def sphx(field, data):\n",
+      "    return np.cos(data[\"phi\"]) * np.sin(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"sphy\", units = \"cm\", take_log=False)\n",
+      "def sphy(field, data):\n",
+      "    return np.sin(data[\"phi\"]) * np.sin(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"sphz\", units = \"cm\", take_log=False)\n",
+      "def sphz(field, data):\n",
+      "    return np.cos(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"funfield\", units=\"cm\", take_log=False)\n",
+      "def funfield(field, data):\n",
+      "    return (np.sin(data[\"phi\"])**2 + np.cos(data[\"theta\"])**2) * (1.0*data[\"r\"].uq+data[\"r\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Loading Data\n",
+      "\n",
+      "Now we can actually load our data.  We use the `load_uniform_grid` function here.  Normally, the first argument would be a dictionary of field data, where the keys were the field names and the values the field data arrays.  Here, we're just going to look at derived fields, so we supply an empty one.\n",
+      "\n",
+      "The next few arguments are the number of dimensions, the bounds, and we then specify the geometry as spherical."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load_uniform_grid({}, [128, 128, 128],\n",
+      "                          bbox=np.array([[0.0, 1.0], [0.0, np.pi], [0.0, 2*np.pi]]),\n",
+      "                          geometry=\"spherical\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Looking at Data\n",
+      "\n",
+      "Now we can take slices.  The first thing we will try is making a slice of data along the \"phi\" axis, here $\\pi/2$, which will be along the y axis in the positive direction.  We use the `.slice` attribute, which creates a slice, and then we convert this into a plot window.  Note that here 2 is used to indicate the third axis (0-indexed) which for spherical data is $\\phi$.\n",
+      "\n",
+      "This is the manual way of creating a plot -- below, we'll use the standard, automatic ways.  Note that the coordinates run from $-r$ to $r$ along the $z$ axis and from $0$ to $r$ along the $R$ axis.  We use the capital $R$ to indicate that it's the $R$ along the $x-y$ plane."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = ds.slice(2, np.pi/2)\n",
+      "p = s.to_pw(\"funfield\", origin=\"native\")\n",
+      "p.set_zlim(\"all\", 0.0, 4.0)\n",
+      "p.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also slice along $r$.  For now, this creates a regular grid with *incorrect* units for phi and theta.  We are currently exploring two other options -- a simple aitoff projection, and fixing it to use the correct units as-is."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"r\", \"funfield\")\n",
+      "s.set_zlim(\"all\", 0.0, 4.0)\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also slice at constant $\\theta$.  But, this is a weird thing!  We're slicing at a constant declination from the azimuth.  What this means is that when thought of in a Cartesian domain, this slice is actually a cone.  The axes have been labeled appropriately, to indicate that these are not exactly the $x$ and $y$ axes, but instead differ by a factor of $\\sin(\\theta))$."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"theta\", \"funfield\")\n",
+      "s.set_zlim(\"all\", 0.0, 4.0)\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We've seen lots of the `funfield` plots, but we can also look at the Cartesian axes.  This next plot plots the Cartesian $x$, $y$ and $z$ values on a $\\theta$ slice.  Because we're not supplying an argument to the `center` parameter, yt will place it at the center of the $\\theta$ axis, which will be at $\\pi/2$, where it will be aligned with the $x-y$ plane.  The slight change in `sphz` results from the cells themselves migrating, and plotting the center of those cells."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"theta\", [\"sphx\", \"sphy\", \"sphz\"])\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do the same with the $\\phi$ axis."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "s = yt.SlicePlot(ds, \"phi\", [\"sphx\", \"sphy\", \"sphz\"])\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/examining/index.rst
--- a/doc/source/examining/index.rst
+++ b/doc/source/examining/index.rst
@@ -9,4 +9,5 @@
    loading_data
    generic_array_data
    generic_particle_data
+   spherical_data
    low_level_inspection

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/examining/spherical_data.rst
--- /dev/null
+++ b/doc/source/examining/spherical_data.rst
@@ -0,0 +1,6 @@
+.. _loading-spherical-data:
+
+Loading Spherical Data
+======================
+
+.. notebook:: Loading_Spherical_Data.ipynb

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -151,19 +151,28 @@
 Overplot Halo Annotations
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_halos(self, halo_catalog, col='white', alpha=1, \
-                             width=None):
+.. function:: annotate_halos(self, halo_catalog, circle_kwargs=None, width=None, \ 
+                             annotate_field=None, font_kwargs=None, factor=1.0):
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
 
    Accepts a :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
-   and plots a circle at the location of each
-   halo with the radius of the circle corresponding to the virial radius of the
-   halo.  If ``width`` is set to None (default) all halos are plotted.
-   Otherwise, only halos that fall within a slab with width ``width`` centered
-   on the center of the plot data. The color and transparency of the circles can
-   be controlled with ``col`` and ``alpha`` respectively.
+   and plots a circle at the location of each halo with the radius of the 
+   circle corresponding to the virial radius of the halo.  If ``width`` is set 
+   to None (default) all halos are plotted, otherwise it accepts a tuple in 
+   the form (1.0, ‘Mpc’) to only display halos that fall within a slab with 
+   width ``width`` centered on the center of the plot data.  The appearance of 
+   the circles can be changed with the circle_kwargs dictionary, which is 
+   supplied to the Matplotlib patch Circle.  One can label each of the halos 
+   with the annotate_field, which accepts a field contained in the halo catalog 
+   to add text to the plot near the halo (example: ``annotate_field=
+   'particle_mass'`` will write the halo mass next to each halo, whereas 
+   ``'particle_identifier'`` shows the halo number).  font_kwargs contains the 
+   arguments controlling the text appearance of the annotated field.
+   Factor is the number the virial radius is multiplied by for plotting the 
+   circles. Ex: ``factor=2.0`` will plot circles with twice the radius of each 
+   halo virial radius.
 
 .. python-script::
 
@@ -177,7 +186,7 @@
    hc.create()
 
    prj = yt.ProjectionPlot(data_ds, 'z', 'density')
-   prj.annotate_halos(hc)
+   prj.annotate_halos(hc, annotate_field='particle_identifier')
    prj.save()
 
 Overplot a Straight Line

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -6,14 +6,20 @@
 There are several colormaps available for yt.  yt includes all of the 
 matplotlib colormaps as well for nearly all functions.  Individual visualization
 functions usually allow you to specify a colormap with the ``cmap`` flag.
-There are a small number of functions (mostly contained in the image_writer 
-module; e.g. write_bitmap, write_image, write_projection, etc.), which do 
-not load the matplotlib infrastructure and can only access the colormaps 
-native to yt.  
 
-Here is a chart of all of the colormaps available.  In addition to each 
-colormap displayed here, you can access its "reverse" by simply appending a 
-``"_r"`` to the end of the colormap name.
+If you have installed brewer2mpl (``pip install brewer2mpl`` or see
+`https://github.com/jiffyclub/brewer2mpl
+<https://github.com/jiffyclub/brewer2mpl>`_), you can also access the discrete
+colormaps available on `http://colorbrewer2.org <http://colorbrewer2.org>`_.
+Instead of supplying the colormap name, specify a tuple of the form (name, type,
+number), for example ``('RdBu', 'Diverging', 9)``.  These discrete colormaps will
+not be interpolated, and can be useful for creating
+colorblind/printer/grayscale-friendly plots. For more information, visit
+`http://colorbrewer2.org <http://colorbrewer2.org>`_.
+
+Here is a chart of all of the yt and matplotlib colormaps available.  In
+addition to each colormap displayed here, you can access its "reverse" by simply
+appending a ``"_r"`` to the end of the colormap name.
 
 All Colormaps (including matplotlib)
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -90,6 +90,7 @@
     kwargs = dict()
 
 ip.ex("from yt.mods import *")
+ip.ex("import yt")
 
 # Now we add some tab completers, in the vein of:
 # http://pymel.googlecode.com/svn/trunk/tools/ipymel.py

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed setup.py
--- a/setup.py
+++ b/setup.py
@@ -103,7 +103,7 @@
         options = Cython.Compiler.Main.CompilationOptions(
             defaults=Cython.Compiler.Main.default_options,
             include_path=extension.include_dirs,
-            language=extension.language, cplus=cplus,
+            cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
                                                      options=options)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -195,7 +195,6 @@
     ## tau_0
     tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
-    tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -328,7 +328,7 @@
                                                         output["redshift"])
                 proper_box_size = self.simulation.box_size / \
                   (1.0 + output["redshift"])
-                pixel_xarea = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
+                pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
                 factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                 mylog.info("Distance to slice = %s" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/halo_analysis/fields.py
--- a/yt/analysis_modules/halo_analysis/fields.py
+++ b/yt/analysis_modules/halo_analysis/fields.py
@@ -30,7 +30,7 @@
         sl_right = slice(2, None, None)
         div_fac = 2.0
     else:
-        sl_left, sl_right, div_face = slice_info
+        sl_left, sl_right, div_fac = slice_info
 
     def _virial_radius(field, data):
         virial_radius = data.get_field_parameter("virial_radius")

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -80,7 +80,6 @@
     """
 
     dds = halo.halo_catalog.data_ds
-    hds = halo.halo_catalog.halos_ds
     center = dds.arr([halo.quantities["particle_position_%s" % axis] \
                       for axis in "xyz"])
     radius = factor * halo.quantities[radius_field]

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -788,7 +788,7 @@
     
         # Now compute the CDM+HDM+baryon transfer functions
         tf_cb = self.tf_master*self.growth_cb/self.growth_k0;
-        tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0;
+        #tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0;
         return tf_cb
 
 
@@ -832,7 +832,6 @@
     area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
-    area_final = area1
     area_last = area1
     one_pow = 3
     while diff > error:

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -32,7 +32,6 @@
     contours = {}
     node_ids = []
     DLE = data_source.ds.domain_left_edge
-    total_vol = None
     selector = getattr(data_source, "base_object", data_source).selector
     masks = dict((g.id, m) for g, m in data_source.blocks)
     for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -128,7 +128,6 @@
         energy = self.spectral_model.ebins
     
         cell_em = EM[idxs]*vol_scale
-        cell_vol = vol[idxs]*vol_scale
     
         number_of_photons = np.zeros(dshape, dtype='uint64')
         energies = []
@@ -139,7 +138,6 @@
 
         for i, ikT in enumerate(kT_idxs):
 
-            ncells = int(bcounts[i])
             ibegin = bcell[i]
             iend = ecell[i]
             kT = kT_bins[ikT] + 0.5*dkT

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -490,7 +490,6 @@
         z_hat = orient.unit_vectors[2]
 
         n_ph = self.photons["NumberOfPhotons"]
-        num_cells = len(n_ph)
         n_ph_tot = n_ph.sum()
         
         eff_area = None
@@ -667,7 +666,6 @@
         tblhdu = hdulist["MATRIX"]
         n_de = len(tblhdu.data["ENERG_LO"])
         mylog.info("Number of energy bins in RMF: %d" % (n_de))
-        de = tblhdu.data["ENERG_HI"] - tblhdu.data["ENERG_LO"]
         mylog.info("Energy limits: %g %g" % (min(tblhdu.data["ENERG_LO"]),
                                              max(tblhdu.data["ENERG_HI"])))
 
@@ -682,7 +680,6 @@
         phYY = events["ypix"][eidxs]
 
         detectedChannels = []
-        pindex = 0
 
         # run through all photon energies and find which bin they go in
         k = 0

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -128,7 +128,6 @@
     if fni.endswith('.fits'):
         fni = fni.replace('.fits','')
 
-    ndomains_finished = 0
     for (num_halos, domain, halos) in domains_list:
         dle,dre = domain
         print 'exporting: '
@@ -154,7 +153,6 @@
             fh.write("%6.6e \n"%(halo.Rvir*ds['kpc']))
         fh.close()
         export_to_sunrise(ds, fnf, star_particle_type, dle*1.0/dn, dre*1.0/dn)
-        ndomains_finished +=1
 
 def domains_from_halos(ds,halo_list,frvir=0.15):
     domains = {}
@@ -172,8 +170,6 @@
     domains_list = [(len(v),k,v) for k,v in domains.iteritems()]
     domains_list.sort() 
     domains_list.reverse() #we want the most populated domains first
-    domains_limits = [d[1] for d in domains_list]
-    domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
 def prepare_octree(ds,ile,start_level=0,debug=True,dd=None,center=None):
@@ -245,10 +241,6 @@
     hs       = hilbert_state()
     start_time = time.time()
     if debug:
-        if center is not None: 
-            c = center*ds['kpc']
-        else:
-            c = ile*1.0/ds.domain_dimensions*ds['kpc']
         printing = lambda x: print_oct(x)
     else:
         printing = None
@@ -332,7 +324,7 @@
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
         subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
-        for i, (vertex,hilbert_child) in enumerate(hilbert):
+        for (vertex, hilbert_child) in hilbert:
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
             if level < 0:

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -89,8 +89,6 @@
     L = 2 * R * cm_per_kpc
     bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) * L
 
-    dl = L/nz
-
     ds = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox)
     ds.index
 

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -418,7 +418,6 @@
         otherwise Glue will be started.
         """
         from glue.core import DataCollection, Data
-        from glue.core.coordinates import coordinates_from_header
         from glue.qt.glue_application import GlueApplication
         
         gdata = Data(label=label)
@@ -494,6 +493,18 @@
                     ftype = self._current_fluid_type
                     if (ftype, fname) not in self.ds.field_info:
                         ftype = self.ds._last_freq[0]
+
+                # really ugly check to ensure that this field really does exist somewhere,
+                # in some naming convention, before returning it as a possible field type
+                if (ftype,fname) not in self.ds.field_list and \
+                        fname not in self.ds.field_list and \
+                        (ftype,fname) not in self.ds.derived_field_list and \
+                        fname not in self.ds.derived_field_list and \
+                        (ftype,fname) not in self._container_fields:
+                    raise YTFieldNotFound((ftype,fname),self.ds)
+
+            # these tests are really insufficient as a field type may be valid, and the
+            # field name may be valid, but not the combination (field type, field name)
             if finfo.particle_type and ftype not in self.ds.particle_types:
                 raise YTFieldTypeNotFound(ftype)
             elif not finfo.particle_type and ftype not in self.ds.fluid_types:
@@ -621,7 +632,7 @@
                 fields_to_generate.append(field)
                 continue
             fields_to_get.append(field)
-        if len(fields_to_get) == 0 and fields_to_generate == 0:
+        if len(fields_to_get) == 0 and len(fields_to_generate) == 0:
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)
@@ -793,7 +804,8 @@
         skip += list(set(frb._exclude_fields).difference(set(self._key_fields)))
         self.fields = ensure_list(fields) + \
             [k for k in self.field_data if k not in skip]
-        (bounds, center) = get_window_parameters(axis, center, width, self.ds)
+        (bounds, center, display_center) = \
+            get_window_parameters(axis, center, width, self.ds)
         pw = PWViewerMPL(self, bounds, fields=self.fields, origin=origin,
                          frb_generator=frb, plot_type=plot_type)
         pw._setup_plots()

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -45,17 +45,12 @@
     YTArray, \
     YTQuantity
 
-from yt.geometry.cartesian_coordinates import \
-    CartesianCoordinateHandler
-from yt.geometry.polar_coordinates import \
-    PolarCoordinateHandler
-from yt.geometry.cylindrical_coordinates import \
-    CylindricalCoordinateHandler
-from yt.geometry.spherical_coordinates import \
-    SphericalCoordinateHandler
-from yt.geometry.geographic_coordinates import \
-    GeographicCoordinateHandler
-from yt.geometry.spec_cube_coordinates import \
+from yt.geometry.coordinates.api import \
+    CartesianCoordinateHandler, \
+    PolarCoordinateHandler, \
+    CylindricalCoordinateHandler, \
+    SphericalCoordinateHandler, \
+    GeographicCoordinateHandler, \
     SpectralCubeCoordinateHandler
 
 # We want to support the movie format in the future.
@@ -460,8 +455,6 @@
             self._last_freq = field
             self._last_finfo = self.field_info[(ftype, fname)]
             return self._last_finfo
-        if fname == self._last_freq[1]:
-            return self._last_finfo
         if fname in self.field_info:
             # Sometimes, if guessing_type == True, this will be switched for
             # the type of field it is.  So we look at the field type and

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/data_objects/tests/test_spheres.py
--- a/yt/data_objects/tests/test_spheres.py
+++ b/yt/data_objects/tests/test_spheres.py
@@ -6,10 +6,11 @@
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+_fields_to_compare = ("spherical_r", "cylindrical_r",
+                      "spherical_theta", "cylindrical_theta",
+                      "spherical_phi", "cylindrical_z")
+
 def test_domain_sphere():
-    ds = fake_random_ds(16, fields = ("density"))
-    sp = ds.sphere(ds.domain_center, ds.domain_width[0])
-
     # Now we test that we can get different radial velocities based on field
     # parameters.
 
@@ -51,3 +52,12 @@
     yield assert_equal, np.any(rp0["radial_velocity"][rp0.used] ==
                                rp1["radial_velocity"][rp1.used]), \
                                False
+
+    ref_sp = ds.sphere("c", 0.25)
+    for f in _fields_to_compare:
+        ref_sp[f].sort()
+    for center in periodicity_cases(ds):
+        sp = ds.sphere(center, 0.25)
+        for f in _fields_to_compare:
+            sp[f].sort()
+            yield assert_equal, sp[f], ref_sp[f]

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -322,10 +322,6 @@
     create_magnitude_field(registry, "particle_specific_angular_momentum",
                            "cm**2/s", ftype=ptype, particle_type=True)
     
-    def _particle_angular_momentum(field, data):
-        return data[ptype, "particle_mass"] \
-             * data[ptype, "particle_specific_angular_momentum"]
-
     def _particle_angular_momentum_x(field, data):
         return data[ptype, "particle_mass"] * \
                data[ptype, "particle_specific_angular_momentum_x"]
@@ -350,6 +346,15 @@
              units="g*cm**2/s", particle_type=True,
              validators=[ValidateParameter('center')])
 
+    def _particle_angular_momentum(field, data):
+        return data[ptype, "particle_mass"] \
+            * data[ptype, "particle_specific_angular_momentum"]
+    registry.add_field((ptype, "particle_angular_momentum"),
+              function=_particle_angular_momentum,
+              particle_type=True,
+              units="g*cm**2/s",
+              validators=[ValidateParameter("center")])
+
     create_magnitude_field(registry, "particle_angular_momentum",
                            "g*cm**2/s", ftype=ptype, particle_type=True)
     

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -123,7 +123,7 @@
     registry.add_field((ftype, "radial_%s" % basename),
                        function = _radial, units = field_units)
     registry.add_field((ftype, "radial_%s_absolute" % basename),
-                       function = _radial, units = field_units)
+                       function = _radial_absolute, units = field_units)
     registry.add_field((ftype, "tangential_%s" % basename),
                        function=_tangential, units = field_units)
 

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -14,11 +14,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import \
+    requires_file, \
+    assert_equal
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
-    small_patch_amr, \
     big_patch_amr, \
+    PixelizedProjectionValuesTest, \
     data_dir_load
 from yt.frontends.art.api import ARTDataset
 
@@ -41,3 +43,8 @@
                     yield PixelizedProjectionValuesTest(
                         d9p, axis, field, weight_field,
                         dobj_name)
+
+
+ at requires_file(d9p)
+def test_ARTDataset():
+    assert isinstance(data_dir_load(d9p), ARTDataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-ARTIO frontend tests 
+ARTIO frontend tests
 
 
 
@@ -24,7 +24,7 @@
 from yt.frontends.artio.api import ARTIODataset
 
 _fields = ("temperature", "density", "velocity_magnitude",
-           ("deposit", "all_density"), ("deposit", "all_count")) 
+           ("deposit", "all_density"), ("deposit", "all_count"))
 
 sizmbhloz = "sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art"
 @requires_ds(sizmbhloz)
@@ -45,3 +45,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(sizmbhloz)
+def test_ARTIODataset():
+    assert isinstance(data_dir_load(sizmbhloz), ARTIODataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -57,3 +57,8 @@
     for test in small_patch_amr(stripping, _fields_stripping):
         test_stripping.__name__ = test.description
         yield test
+
+
+ at requires_file(cloud)
+def test_AthenaDataset():
+    assert isinstance(data_dir_load(cloud), AthenaDataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1,5 +1,5 @@
 """
-Data structures for Boxlib Codes 
+Data structures for BoxLib Codes
 
 
 
@@ -15,10 +15,8 @@
 
 import os
 import re
-import weakref
 import itertools
 
-from collections import defaultdict
 from stat import ST_CTIME
 
 import numpy as np
@@ -27,53 +25,46 @@
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
-from yt.geometry.selection_routines import \
-    RegionSelector
 from yt.utilities.io_handler import \
     io_registry
-from yt.utilities.physical_constants import \
-    cm_per_mpc
 
 from .fields import \
     BoxlibFieldInfo, \
     MaestroFieldInfo, \
     CastroFieldInfo
 
-from .io import IOHandlerBoxlib
 # This is what we use to find scientific notation that might include d's
 # instead of e's.
 _scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
 # This is the dimensions in the Cell_H file for each level
 # It is different for different dimensionalities, so we make a list
-_dim_finder = [ \
+_dim_finder = [
     re.compile(r"\(\((\d+)\) \((\d+)\) \(\d+\)\)$"),
     re.compile(r"\(\((\d+,\d+)\) \((\d+,\d+)\) \(\d+,\d+\)\)$"),
     re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")]
 # This is the line that prefixes each set of data for a FAB in the FAB file
 # It is different for different dimensionalities, so we make a list
 _endian_regex = r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), \(([0-9 ]+)\)\)\)"
-_header_pattern = [ \
-    re.compile(_endian_regex + 
+_header_pattern = [
+    re.compile(_endian_regex +
                r"\(\((\d+)\) \((\d+)\) \((\d+)\)\) (\d+)\n"),
-    re.compile(_endian_regex + 
+    re.compile(_endian_regex +
                r"\(\((\d+,\d+)\) \((\d+,\d+)\) \((\d+,\d+)\)\) (\d+)\n"),
-    re.compile(_endian_regex + 
+    re.compile(_endian_regex +
                r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")]
 
 
-
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0
     _offset = -1
 
-    def __init__(self, grid_id, offset, filename = None,
-                 index = None):
+    def __init__(self, grid_id, offset, filename=None,
+                 index=None):
         super(BoxlibGrid, self).__init__(grid_id, filename, index)
         self._base_offset = offset
         self._parent_id = []
@@ -126,7 +117,7 @@
         return coords
 
     # Override this as well, since refine_by can vary
-    def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
+    def _fill_child_mask(self, child, mask, tofill, dlevel=1):
         rf = self.ds.ref_factors[self.Level]
         if dlevel != 1:
             raise NotImplementedError
@@ -139,8 +130,10 @@
              startIndex[1]:endIndex[1],
              startIndex[2]:endIndex[2]] = tofill
 
+
 class BoxlibHierarchy(GridIndex):
     grid = BoxlibGrid
+
     def __init__(self, ds, dataset_type='boxlib_native'):
         self.dataset_type = dataset_type
         self.header_filename = os.path.join(ds.output_dir, 'Header')
@@ -149,19 +142,17 @@
         GridIndex.__init__(self, ds, dataset_type)
         self._cache_endianness(self.grids[-1])
 
-        #self._read_particles()
-
     def _parse_index(self):
         """
         read the global header file for an Boxlib plotfile output.
         """
         self.max_level = self.dataset._max_level
-        header_file = open(self.header_filename,'r')
+        header_file = open(self.header_filename, 'r')
 
         self.dimensionality = self.dataset.dimensionality
         _our_dim_finder = _dim_finder[self.dimensionality-1]
-        DRE = self.dataset.domain_right_edge # shortcut
-        DLE = self.dataset.domain_left_edge # shortcut
+        DRE = self.dataset.domain_right_edge  # shortcut
+        DLE = self.dataset.domain_left_edge   # shortcut
 
         # We can now skip to the point in the file we want to start parsing.
         header_file.seek(self.dataset._header_mesh_start)
@@ -190,13 +181,13 @@
         if int(header_file.next()) != 0:
             raise RuntimeError("INTERNAL ERROR! This should be a zero.")
 
-        # each level is one group with ngrids on it. 
-        # each grid has self.dimensionality number of lines of 2 reals 
+        # each level is one group with ngrids on it.
+        # each grid has self.dimensionality number of lines of 2 reals
         self.grids = []
         grid_counter = 0
         for level in range(self.max_level + 1):
             vals = header_file.next().split()
-            lev, ngrids, cur_time = int(vals[0]),int(vals[1]),float(vals[2])
+            lev, ngrids = int(vals[0]), int(vals[1])
             assert(lev == level)
             nsteps = int(header_file.next())
             for gi in range(ngrids):
@@ -232,10 +223,10 @@
             for gi in range(ngrids):
                 # components within it
                 start, stop = _our_dim_finder.match(level_header_file.next()).groups()
-                # fix for non-3d data 
+                # fix for non-3d data
                 # note we append '0' to both ends b/c of the '+1' in dims below
                 start += ',0'*(3-self.dimensionality)
-                stop  += ',0'*(3-self.dimensionality)
+                stop += ',0'*(3-self.dimensionality)
                 start = np.array(start.split(","), dtype="int64")
                 stop = np.array(stop.split(","), dtype="int64")
                 dims = stop - start + 1
@@ -259,7 +250,7 @@
             # already read the filenames above...
         self.float_type = 'float64'
 
-    def _cache_endianness(self,test_grid):
+    def _cache_endianness(self, test_grid):
         """
         Cache the endianness and bytes perreal of the grids by using a
         test grid and assuming that all grids have the same
@@ -270,7 +261,7 @@
         # open the test file & grab the header
         with open(os.path.expanduser(test_grid.filename), 'rb') as f:
             header = f.readline()
-        
+
         bpr, endian, start, stop, centering, nc = \
             _header_pattern[self.dimensionality-1].search(header).groups()
         # Note that previously we were using a different value for BPR than we
@@ -294,7 +285,8 @@
         self.grids = np.array(self.grids, dtype='object')
         self._reconstruct_parent_child()
         for i, grid in enumerate(self.grids):
-            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            if (i % 1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i,
+                                           self.num_grids)
             grid._prepare_grid()
             grid._setup_dx()
         mylog.debug("Done creating grid objects")
@@ -308,10 +300,10 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = np.where(mask.astype("bool")) # where is a tuple
-            grid._children_ids = ids[0] + grid._id_offset 
+            ids = np.where(mask.astype("bool"))  # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset
         mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
+        for i, grid in enumerate(self.grids):  # Second pass
             for child in grid.Children:
                 child._parent_id.append(i + grid._id_offset)
 
@@ -331,10 +323,10 @@
         for line in header_file:
             if len(line.split()) != 3: continue
             self.num_grids += int(line.split()[1])
-        
+
     def _initialize_grid_arrays(self):
         super(BoxlibHierarchy, self)._initialize_grid_arrays()
-        self.grid_start_index = np.zeros((self.num_grids,3), 'int64')
+        self.grid_start_index = np.zeros((self.num_grids, 3), 'int64')
 
     def _initialize_state_variables(self):
         """override to not re-initialize num_grids in AMRHierarchy.__init__
@@ -349,7 +341,7 @@
         self.field_list = [("boxlib", f) for f in
                            self.dataset._field_list]
         self.field_indexes = dict((f[1], i)
-                                for i, f in enumerate(self.field_list))
+                                  for i, f in enumerate(self.field_list))
         # There are times when field_list may change.  We copy it here to
         # avoid that possibility.
         self.field_order = [f for f in self.field_list]
@@ -357,6 +349,7 @@
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.dataset)
 
+
 class BoxlibDataset(Dataset):
     """
     This class is a stripped down class that simply reads and parses
@@ -370,10 +363,10 @@
     periodicity = (True, True, True)
 
     def __init__(self, output_dir,
-                 cparam_filename = "inputs",
-                 fparam_filename = "probin",
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
                  dataset_type='boxlib_native',
-                 storage_filename = None):
+                 storage_filename=None):
         """
         The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -390,14 +383,13 @@
         Dataset.__init__(self, output_dir, dataset_type)
 
         # These are still used in a few places.
-        if not "HydroMethod" in self.parameters.keys():
+        if "HydroMethod" not in self.parameters.keys():
             self.parameters["HydroMethod"] = 'boxlib'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["EOSType"] = -1 # default
+        self.parameters["Time"] = 1.     # default unit is 1...
+        self.parameters["EOSType"] = -1  # default
         self.parameters["gamma"] = self.parameters.get(
             "materials.gamma", 1.6667)
 
-
     def _localize_check(self, fn):
         # If the file exists, use it.  If not, set it to None.
         root_dir = os.path.dirname(self.output_dir)
@@ -410,6 +402,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -418,11 +412,11 @@
         args = inspect.getcallargs(cls.__init__, args, kwargs)
         # This might need to be localized somehow
         inputs_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['cparam_filename'])
+            os.path.dirname(os.path.abspath(output_dir)),
+            args['cparam_filename'])
         if not os.path.exists(inputs_filename) and \
            not os.path.exists(jobinfo_filename):
-            return True # We have no parameters to go off of
+            return True  # We have no parameters to go off of
         # If we do have either inputs or jobinfo, we should be deferring to a
         # different frontend.
         return False
@@ -464,7 +458,7 @@
             self.omega_lambda = self.parameters["comoving_OmL"]
             self.omega_matter = self.parameters["comoving_OmM"]
             self.hubble_constant = self.parameters["comoving_h"]
-            a_file = open(os.path.join(self.output_dir,'comoving_a'))
+            a_file = open(os.path.join(self.output_dir, 'comoving_a'))
             line = a_file.readline().strip()
             a_file.close()
             self.current_redshift = 1/float(line) - 1
@@ -491,7 +485,7 @@
             # So we'll try to determine this.
             vals = vals.split()
             if any(_scinot_finder.match(v) for v in vals):
-                vals = [float(v.replace("D","e").replace("d","e"))
+                vals = [float(v.replace("D", "e").replace("d", "e"))
                         for v in vals]
             if len(vals) == 1:
                 vals = vals[0]
@@ -509,22 +503,22 @@
         # call readline() if we want to end up with an offset at the very end.
         # Fortunately, elsewhere we don't care about the offset, so we're fine
         # everywhere else using iteration exclusively.
-        header_file = open(os.path.join(self.output_dir,'Header'))
+        header_file = open(os.path.join(self.output_dir, 'Header'))
         self.orion_version = header_file.readline().rstrip()
         n_fields = int(header_file.readline())
 
         self._field_list = [header_file.readline().strip()
-                           for i in range(n_fields)]
+                            for i in range(n_fields)]
 
         self.dimensionality = int(header_file.readline())
         self.current_time = float(header_file.readline())
         # This is traditionally a index attribute, so we will set it, but
         # in a slightly hidden variable.
-        self._max_level = int(header_file.readline()) 
+        self._max_level = int(header_file.readline())
         self.domain_left_edge = np.array(header_file.readline().split(),
                                          dtype="float64")
         self.domain_right_edge = np.array(header_file.readline().split(),
-                                         dtype="float64")
+                                          dtype="float64")
         ref_factors = np.array([int(i) for i in
                                 header_file.readline().split()])
         if ref_factors.size == 0:
@@ -540,26 +534,26 @@
             self.refine_by = min(ref_factors)
             # Check that they're all multiples of the minimum.
             if not all(float(rf)/self.refine_by ==
-                   int(float(rf)/self.refine_by) for rf in ref_factors):
+                       int(float(rf)/self.refine_by) for rf in ref_factors):
                 raise RuntimeError
             base_log = np.log2(self.refine_by)
-            self.level_offsets = [0] # level 0 has to have 0 offset
+            self.level_offsets = [0]  # level 0 has to have 0 offset
             lo = 0
             for lm1, rf in enumerate(self.ref_factors):
                 lo += int(np.log2(rf) / base_log) - 1
                 self.level_offsets.append(lo)
-        #assert(np.unique(ref_factors).size == 1)
+        # assert(np.unique(ref_factors).size == 1)
         else:
             self.refine_by = ref_factors[0]
             self.level_offsets = [0 for l in range(self._max_level + 1)]
-        # Now we read the global index space, to get 
+        # Now we read the global index space, to get
         index_space = header_file.readline()
         # This will be of the form:
         #  ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
         # So note that if we split it all up based on spaces, we should be
         # fine, as long as we take the first two entries, which correspond to
         # the root level.  I'm not 100% pleased with this solution.
-        root_space = index_space.replace("(","").replace(")","").split()[:2]
+        root_space = index_space.replace("(", "").replace(")", "").split()[:2]
         start = np.array(root_space[0].split(","), dtype="int64")
         stop = np.array(root_space[1].split(","), dtype="int64")
         self.domain_dimensions = stop - start + 1
@@ -582,9 +576,9 @@
             raise RuntimeError("yt does not yet support spherical geometry")
 
         # overrides for 1/2-dimensional data
-        if self.dimensionality == 1: 
+        if self.dimensionality == 1:
             self._setup1d()
-        elif self.dimensionality == 2: 
+        elif self.dimensionality == 2:
             self._setup2d()
 
     def _set_code_unit_attributes(self):
@@ -594,20 +588,20 @@
         self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _setup1d(self):
-#        self._index_class = BoxlibHierarchy1D
-#        self._fieldinfo_fallback = Orion1DFieldInfo
+        # self._index_class = BoxlibHierarchy1D
+        # self._fieldinfo_fallback = Orion1DFieldInfo
         self.domain_left_edge = \
             np.concatenate([self.domain_left_edge, [0.0, 0.0]])
         self.domain_right_edge = \
             np.concatenate([self.domain_right_edge, [1.0, 1.0]])
         tmp = self.domain_dimensions.tolist()
-        tmp.extend((1,1))
+        tmp.extend((1, 1))
         self.domain_dimensions = np.array(tmp)
         tmp = list(self.periodicity)
         tmp[1] = False
         tmp[2] = False
         self.periodicity = ensure_tuple(tmp)
-        
+
     def _setup2d(self):
         self.domain_left_edge = \
             np.concatenate([self.domain_left_edge, [0.0]])
@@ -636,12 +630,13 @@
         offset = self.level_offsets[l1] - self.level_offsets[l0]
         return self.refine_by**(l1-l0 + offset)
 
+
 class OrionHierarchy(BoxlibHierarchy):
-    
+
     def __init__(self, ds, dataset_type='orion_native'):
         BoxlibHierarchy.__init__(self, ds, dataset_type)
         self._read_particles()
-        #self.io = IOHandlerOrion
+        # self.io = IOHandlerOrion
 
     def _read_particles(self):
         """
@@ -673,7 +668,7 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=np.ones(self.num_grids)
+                mask = np.ones(self.num_grids)
                 for i in xrange(len(coord)):
                     np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
                     np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
@@ -688,39 +683,42 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
-                
+
+
 class OrionDataset(BoxlibDataset):
 
     _index_class = OrionHierarchy
 
     def __init__(self, output_dir,
-                 cparam_filename = "inputs",
-                 fparam_filename = "probin",
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
                  dataset_type='orion_native',
-                 storage_filename = None):
+                 storage_filename=None):
 
         BoxlibDataset.__init__(self, output_dir,
-                 cparam_filename, fparam_filename, dataset_type)
-          
+                               cparam_filename, fparam_filename, dataset_type)
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        # fill our args                                                                               
+        # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
-            # We *know* it's not boxlib if Header doesn't exist.                                      
+            # We *know* it's not boxlib if Header doesn't exist.
             return False
         args = inspect.getcallargs(cls.__init__, args, kwargs)
-        # This might need to be localized somehow                                                     
+        # This might need to be localized somehow
         inputs_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['cparam_filename'])
+            os.path.dirname(os.path.abspath(output_dir)),
+            args['cparam_filename'])
         if not os.path.exists(inputs_filename):
             return False
         if os.path.exists(jobinfo_filename):
             return False
-        # Now we check for all the others                                                             
+        # Now we check for all the others
         lines = open(inputs_filename).readlines()
         if any(("castro." in line for line in lines)): return False
         if any(("nyx." in line for line in lines)): return False
@@ -728,6 +726,7 @@
         if any(("geometry.prob_lo" in line for line in lines)): return True
         return False
 
+
 class CastroDataset(BoxlibDataset):
 
     _field_info_class = CastroFieldInfo
@@ -736,6 +735,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -748,6 +749,7 @@
         if any(line.startswith("Castro   ") for line in lines): return True
         return False
 
+
 class MaestroDataset(BoxlibDataset):
 
     _field_info_class = MaestroFieldInfo
@@ -756,6 +758,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -765,7 +769,7 @@
             return False
         # Now we check the job_info for the mention of maestro
         lines = open(jobinfo_filename).readlines()
-        if any("maestro" in line.lower() for line in lines): return True
+        if any(line.startswith("MAESTRO   ") for line in lines): return True
         return False
 
     def _parse_parameter_file(self):
@@ -782,7 +786,7 @@
                 line = f.next()
             # get the runtime parameters
             for line in f:
-                p, v = (_.strip() for _ in line[4:].split("=",1))
+                p, v = (_.strip() for _ in line[4:].split("=", 1))
                 if len(v) == 0:
                     self.parameters[p] = ""
                 else:
@@ -827,7 +831,7 @@
         maxlevel = int(header.readline()) # max level
 
         # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel + 1):dummy = header.readline()
+        for i in range(maxlevel + 1): dummy = header.readline()
 
         grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
@@ -844,6 +848,7 @@
 
         self.grid_particle_count[:, 0] = grid_info[:, 1]
 
+
 class NyxDataset(BoxlibDataset):
 
     _index_class = NyxHierarchy
@@ -852,6 +857,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         pname = args[0].rstrip("/")
+        # boxlib datasets are always directories
+        if not os.path.isdir(pname): return False
         dn = os.path.dirname(pname)
         if len(args) > 1:
             kwargs['paramFilename'] = args[1]
@@ -862,15 +869,13 @@
         # We check for the job_info file's existence because this is currently
         # what distinguishes Nyx data from MAESTRO data.
         pfn = os.path.join(pfname)
-        if not os.path.exists(pfn): return False
+        if not os.path.exists(pfn) or os.path.isdir(pfn): return False
         nyx = any(("nyx." in line for line in open(pfn)))
-        maestro = os.path.exists(os.path.join(pname, "job_info"))
-        orion = (not nyx) and (not maestro)
         return nyx
 
     def _parse_parameter_file(self):
         super(NyxDataset, self)._parse_parameter_file()
-        #return
+        # return
         # Nyx is always cosmological.
         self.cosmological_simulation = 1
         self.omega_lambda = self.parameters["comoving_OmL"]
@@ -904,7 +909,7 @@
     v = vals.split()[0] # Just in case there are multiple; we'll go
                         # back afterward to using vals.
     try:
-        float(v.upper().replace("D","E"))
+        float(v.upper().replace("D", "E"))
     except:
         pcast = str
         if v in ("F", "T"):

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -42,3 +42,8 @@
     for test in small_patch_amr(rt, _fields):
         test_radtube.__name__ = test.description
         yield test
+
+
+ at requires_file(rt)
+def test_OrionDataset():
+    assert isinstance(data_dir_load(rt), OrionDataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -13,15 +13,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import \
+    requires_file, \
+    assert_equal
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     small_patch_amr, \
-    big_patch_amr, \
     data_dir_load
-from yt.frontends.chombo.api import ChomboDataset
+from yt.frontends.chombo.api import \
+    ChomboDataset, \
+    Orion2Dataset
 
-_fields = ("density", "velocity_magnitude", #"velocity_divergence",
+_fields = ("density", "velocity_magnitude",  # "velocity_divergence",
            "magnetic_field_x")
 
 gc = "GaussianCloud/data.0077.3d.hdf5"
@@ -49,7 +52,8 @@
 def test_zp():
     ds = data_dir_load(zp)
     yield assert_equal, str(ds), "plt32.2d.hdf5"
-    for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
+    for test in small_patch_amr(zp, _zp_fields, input_center="c",
+                                input_weight="rhs"):
         test_tb.__name__ = test.description
         yield test
 
@@ -61,3 +65,17 @@
     for test in small_patch_amr(kho, _fields):
         test_gc.__name__ = test.description
         yield test
+
+ at requires_file(zp)
+def test_ChomboDataset():
+    assert isinstance(data_dir_load(zp), ChomboDataset)
+
+
+ at requires_file(gc)
+def test_Orion2Dataset():
+    assert isinstance(data_dir_load(gc), Orion2Dataset)
+
+
+ at requires_file(kho)
+def test_PlutoDataset():
+    assert isinstance(data_dir_load(kho), PlutoDataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -42,3 +42,8 @@
     for test in small_patch_amr(wt, _fields_2d):
         test_wind_tunnel.__name__ = test.description
         yield test
+
+
+ at requires_file(wt)
+def test_FLASHDataset():
+    assert isinstance(data_dir_load(wt), FLASHDataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -56,3 +56,7 @@
         for dobj_name in dso:
             yield FieldValuesTest(c5, field, dobj_name)
 
+
+ at requires_file(c5)
+def test_MoabHex8Dataset():
+    assert isinstance(data_dir_load(c5), MoabHex8Dataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -94,8 +94,9 @@
             return rv
         self.add_field(("gas", "temperature"), function=_temperature,
                         units="K")
+        self.create_cooling_fields()
 
-    def create_cooling_fields(self, filename):
+    def create_cooling_fields(self):
         num = os.path.basename(self.ds.parameter_filename).split("."
                 )[0].split("_")[1]
         filename = "%s/cooling_%05i.out" % (
@@ -104,7 +105,7 @@
         if not os.path.exists(filename): return
         def _create_field(name, interp_object):
             def _func(field, data):
-                shape = data["Temperature"].shape
+                shape = data["temperature"].shape
                 d = {'lognH': np.log10(_X*data["density"]/mh).ravel(),
                      'logT' : np.log10(data["temperature"]).ravel()}
                 rv = 10**interp_object(d).reshape(shape)
@@ -131,4 +132,4 @@
             interp = BilinearFieldInterpolator(tvals[n],
                         (avals["lognH"], avals["logT"]),
                         ["lognH", "logT"], truncate = True)
-            _create_field(n, interp)
+            _create_field(("gas", n), interp)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-RAMSES frontend tests 
+RAMSES frontend tests
 
 
 
@@ -21,10 +21,10 @@
     PixelizedProjectionValuesTest, \
     FieldValuesTest, \
     create_obj
-from yt.frontends.artio.api import ARTIODataset
+from yt.frontends.ramses.api import RAMSESDataset
 
 _fields = ("temperature", "density", "velocity_magnitude",
-           ("deposit", "all_density"), ("deposit", "all_count")) 
+           ("deposit", "all_density"), ("deposit", "all_count"))
 
 output_00080 = "output_00080/info_00080.txt"
 @requires_ds(output_00080)
@@ -44,3 +44,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(output_00080)
+def test_RAMSESDataset():
+    assert isinstance(data_dir_load(output_00080), RAMSESDataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -24,6 +24,7 @@
 import os
 import types
 
+import yt.units
 from yt.utilities.fortran_utils import read_record
 from yt.utilities.logger import ytLogger as mylog
 from yt.geometry.particle_geometry_handler import \
@@ -313,7 +314,8 @@
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
 
         # Set standard values
-        self.current_time = hvals[self._time_readin] * sec_conversion["Gyr"]
+        self.current_time = hvals[self._time_readin] * \
+                            sec_conversion["Gyr"] * yt.units.s
         if self.domain_left_edge is None:
             self.domain_left_edge = np.zeros(3, "float64")
             self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -53,7 +53,7 @@
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
     _known_ptypes = ghdf5_ptypes
     _var_mass = None
-    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 
+    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
                        'Neon', 'Magnesium', 'Silicon', 'Iron' )
 
 
@@ -81,6 +81,8 @@
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
                 x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
                 y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
                 z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
@@ -96,6 +98,8 @@
         for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
                 g = f["/%s" % ptype]
                 coords = g["Coordinates"][:].astype("float64")
                 mask = selector.select_points(
@@ -103,11 +107,11 @@
                 del coords
                 if mask is None: continue
                 for field in field_list:
-                    
+
                     if field in ("Mass", "Masses") and \
                         ptype not in self.var_mass:
                         data = np.empty(mask.sum(), dtype="float64")
-                        ind = self._known_ptypes.index(ptype) 
+                        ind = self._known_ptypes.index(ptype)
                         data[:] = self.ds["Massarr"][ind]
 
                     elif field in self._element_names:
@@ -152,7 +156,7 @@
         f = _get_h5_handle(data_file.filename)
         pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
         f.close()
-        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) 
+        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
         return npart
 
 
@@ -164,7 +168,7 @@
 
         # loop over all keys in OWLS hdf5 file
         #--------------------------------------------------
-        for key in f.keys():   
+        for key in f.keys():
 
             # only want particle data
             #--------------------------------------
@@ -334,7 +338,7 @@
 
     def _count_particles(self, data_file):
         npart = dict((self._ptypes[i], v)
-            for i, v in enumerate(data_file.header["Npart"])) 
+            for i, v in enumerate(data_file.header["Npart"]))
         return npart
 
     # header is 256, but we have 4 at beginning and end for ints
@@ -443,13 +447,13 @@
         dtype = None
         # We need to do some fairly ugly detection to see what format the auxiliary
         # files are in.  They can be either ascii or binary, and the binary files can be
-        # either floats, ints, or doubles.  We're going to use a try-catch cascade to 
+        # either floats, ints, or doubles.  We're going to use a try-catch cascade to
         # determine the format.
         try:#ASCII
             auxdata = np.genfromtxt(filename, skip_header=1)
             if auxdata.size != np.sum(data_file.total_particles.values()):
                 print "Error reading auxiliary tipsy file"
-                raise RuntimeError 
+                raise RuntimeError
         except ValueError:#binary/xdr
             f = open(filename, 'rb')
             l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
@@ -469,7 +473,7 @@
                 except struct.error: # None of the binary attempts to read succeeded
                     print "Error reading auxiliary tipsy file"
                     raise RuntimeError
-            
+
         # Use the mask to slice out the appropriate particle type data
         if mask.size == data_file.total_particles['Gas']:
             return auxdata[:data_file.total_particles['Gas']]
@@ -556,14 +560,14 @@
 
     def _update_domain(self, data_file):
         '''
-        This method is used to determine the size needed for a box that will 
+        This method is used to determine the size needed for a box that will
         bound the particles.  It simply finds the largest position of the
         whole set of particles, and sets the domain to +/- that value.
         '''
         ds = data_file.ds
         ind = 0
         # Check to make sure that the domain hasn't already been set
-        # by the parameter file 
+        # by the parameter file
         if np.all(np.isfinite(ds.domain_left_edge)) and np.all(np.isfinite(ds.domain_right_edge)):
             return
         with open(data_file.filename, "rb") as f:
@@ -682,11 +686,11 @@
                 continue
             field_list.append((ptype, field))
         if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
-            field_list += [("Gas",a) for a in self._aux_fields] 
+            field_list += [("Gas",a) for a in self._aux_fields]
         if any(["DarkMatter"==f[0] for f in field_list]):
-            field_list += [("DarkMatter",a) for a in self._aux_fields] 
+            field_list += [("DarkMatter",a) for a in self._aux_fields]
         if any(["Stars"==f[0] for f in field_list]):
-            field_list += [("Stars",a) for a in self._aux_fields] 
+            field_list += [("Stars",a) for a in self._aux_fields]
         self._field_list = field_list
         return self._field_list
 
@@ -706,11 +710,11 @@
 class IOHandlerHTTPStream(BaseIOHandler):
     _dataset_type = "http_particle_stream"
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
-    
+
     def __init__(self, ds):
         if requests is None:
             raise RuntimeError
-        self._url = ds.base_url 
+        self._url = ds.base_url
         # This should eventually manage the IO and cache it
         self.total_bytes = 0
         super(IOHandlerHTTPStream, self).__init__(ds)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/sph/tests/test_owls.py
--- a/yt/frontends/sph/tests/test_owls.py
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -53,3 +53,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(os33)
+def test_OWLSDataset():
+    assert isinstance(data_dir_load(os33), OWLSDataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/frontends/sph/tests/test_tipsy.py
--- a/yt/frontends/sph/tests/test_tipsy.py
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -92,3 +92,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(pkdgrav)
+def test_TipsyDataset():
+    assert isinstance(data_dir_load(pkdgrav), TipsyDataset)

diff -r d47f280595fbd2d574116dcd57ce139373eb2895 -r 858828e7276bb03cff979677dd46339fee03fbed yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -270,7 +270,6 @@
 
     api_version = get_ipython_api_version()
 
-    stack = inspect.stack()
     frame = inspect.stack()[num_up]
     loc = frame[0].f_locals.copy()
     glo = frame[0].f_globals
@@ -537,7 +536,6 @@
     return version_info
 
 def get_script_contents():
-    stack = inspect.stack()
     top_frame = inspect.stack()[-1]
     finfo = inspect.getframeinfo(top_frame[0])
     if finfo[2] != "<module>": return None
@@ -749,6 +747,7 @@
     return _func
     
 def enable_plugins():
+    import yt
     from yt.config import ytcfg
     my_plugin_name = ytcfg.get("yt","pluginfilename")
     # We assume that it is with respect to the $HOME/.yt directory
@@ -758,4 +757,10 @@
         _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
     if os.path.isfile(_fn):
         mylog.info("Loading plugins from %s", _fn)
-        execfile(_fn)
+        execfile(_fn, yt.__dict__)
+
+def fix_unitary(u):
+    if u == '1':
+        return 'unitary'
+    else:
+        return u

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/5a1377112b66/
Changeset:   5a1377112b66
Branch:      yt
User:        atmyers
Date:        2014-09-17 07:16:50+00:00
Summary:     missing an import
Affected #:  1 file

diff -r 858828e7276bb03cff979677dd46339fee03fbed -r 5a1377112b6675b21a9d4c54a1e5bdd1f5428319 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -22,7 +22,8 @@
     data_dir_load
 from yt.frontends.chombo.api import \
     ChomboDataset, \
-    Orion2Dataset
+    Orion2Dataset, \
+    PlutoDataset
 
 _fields = ("density", "velocity_magnitude",  # "velocity_divergence",
            "magnetic_field_x")


https://bitbucket.org/yt_analysis/yt/commits/1d2404dda69a/
Changeset:   1d2404dda69a
Branch:      yt
User:        atmyers
Date:        2014-09-20 23:31:46+00:00
Summary:     np.arange --> range
Affected #:  1 file

diff -r 5a1377112b6675b21a9d4c54a1e5bdd1f5428319 -r 1d2404dda69a376663fe49be33260151ce755d1b yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -48,7 +48,7 @@
     def _magnitude(field, data):
         fn = field_components[0]
         mag = data[fn] * data[fn]
-        for idim in np.arange(1, registry.ds.dimensionality):
+        for idim in range(1, registry.ds.dimensionality):
             fn = field_components[idim]
             mag += data[fn] * data[fn]
         return np.sqrt(mag)
@@ -66,7 +66,7 @@
     def _squared(field, data):
         fn = field_components[0]
         squared  = data[fn] * data[fn]
-        for idim in np.arange(1, registry.ds.dimensionality):
+        for idim in range(1, registry.ds.dimensionality):
             fn = field_components[idim]
             squared += data[fn] * data[fn]
         return squared


https://bitbucket.org/yt_analysis/yt/commits/fcb604295fa9/
Changeset:   fcb604295fa9
Branch:      yt
User:        atmyers
Date:        2014-09-21 01:18:20+00:00
Summary:     making come changes suggested by Chris Malone in code review
Affected #:  1 file

diff -r 1d2404dda69a376663fe49be33260151ce755d1b -r fcb604295fa9aff1c705c62a1d3744ee65cc29c4 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -18,6 +18,7 @@
 import os
 import weakref
 import numpy as np
+import six
 
 from collections import \
      defaultdict
@@ -46,6 +47,7 @@
     ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D, \
     PlutoFieldInfo
 
+
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
     __slots__ = ["_level_id", "stop_index"]
@@ -91,6 +93,7 @@
         return [self.index.grids[cid - self._id_offset]
                 for cid in self._children_ids]
 
+
 class ChomboHierarchy(GridIndex):
 
     grid = ChomboGrid
@@ -238,6 +241,7 @@
             for child in grid.Children:
                 child._parent_id.append(i + grid._id_offset)
 
+
 class ChomboDataset(Dataset):
     _index_class = ChomboHierarchy
     _field_info_class = ChomboFieldInfo
@@ -263,8 +267,8 @@
         try:
             self.current_time = self._handle.attrs['time']
         except KeyError:
-            try: 
-                self.current_time = self._handle['level_0'].attrs['time']
+            try:
+                self.current_time = self._handle['/level_0'].attrs['time']
             except KeyError:
                 self.current_time = 0.0
 
@@ -354,7 +358,7 @@
         pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
-        if isinstance(args[0], str):
+        if isinstance(args[0], six.string_types): 
             dir_name = os.path.dirname(os.path.abspath(args[0]))
             pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
@@ -384,6 +388,7 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
+
 class PlutoHierarchy(ChomboHierarchy):
 
     def __init__(self, ds, dataset_type="pluto_chombo_native"):
@@ -504,7 +509,7 @@
 
         pluto_ini_file_exists = False
 
-        if type(args[0]) == type(""):
+        if isinstance(args[0], six.string_types):
             dir_name = os.path.dirname(os.path.abspath(args[0]))
             pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
@@ -514,6 +519,7 @@
 
         return False
 
+
 class Orion2Hierarchy(ChomboHierarchy):
 
     def __init__(self, ds, dataset_type="orion_chombo_native"):
@@ -547,6 +553,7 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
+
 class Orion2Dataset(ChomboDataset):
 
     _index_class = Orion2Hierarchy
@@ -625,11 +632,13 @@
                 pass
         return False
 
+
 class ChomboPICHierarchy(ChomboHierarchy):
 
     def __init__(self, ds, dataset_type="chombo_hdf5"):
         ChomboHierarchy.__init__(self, ds, dataset_type)
 
+
 class ChomboPICDataset(ChomboDataset):
 
     _index_class = ChomboPICHierarchy
@@ -653,7 +662,7 @@
         pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
-        if isinstance(args[0], str):
+        if isinstance(args[0], six.string_types):
             dir_name = os.path.dirname(os.path.abspath(args[0]))
             pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")


https://bitbucket.org/yt_analysis/yt/commits/d596ad6eafb7/
Changeset:   d596ad6eafb7
Branch:      yt
User:        ngoldbaum
Date:        2014-09-21 03:24:32+00:00
Summary:     Merged in atmyers/yt (pull request #1186)

Chombo frontend improvements
Affected #:  15 files

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -24,7 +24,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Chombo                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
+| Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Enzo                  |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -955,7 +955,7 @@
     x_min : float
         The minimum value of the x profile field.
     x_max : float
-        The maximum value of hte x profile field.
+        The maximum value of the x profile field.
     x_log : boolean
         Controls whether or not the bins for the x field are evenly
         spaced in linear (False) or log (True) space.

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -195,7 +195,7 @@
 
     def get_label(self, projected=False):
         """
-        Return a data label for the given field, inluding units.
+        Return a data label for the given field, including units.
         """
         name = self.name[1]
         if self.display_name is not None:

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -43,18 +43,14 @@
                            ftype = "gas", slice_info = None,
                            validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _magnitude(field, data):
-        mag  = data[xn] * data[xn]
-        mag += data[yn] * data[yn]
-        mag += data[zn] * data[zn]
+        fn = field_components[0]
+        mag = data[fn] * data[fn]
+        for idim in range(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            mag += data[fn] * data[fn]
         return np.sqrt(mag)
 
     registry.add_field((ftype, "%s_magnitude" % basename),
@@ -65,18 +61,14 @@
                          ftype = "gas", slice_info = None,
                          validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _squared(field, data):
-        squared  = data[xn] * data[xn]
-        squared += data[yn] * data[yn]
-        squared += data[zn] * data[zn]
+        fn = field_components[0]
+        squared  = data[fn] * data[fn]
+        for idim in range(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            squared += data[fn] * data[fn]
         return squared
 
     registry.add_field((ftype, "%s_squared" % basename),

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -14,14 +14,24 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-      ChomboGrid, \
-      ChomboHierarchy, \
-      ChomboDataset, \
-      Orion2Hierarchy, \
-      Orion2Dataset
+    ChomboGrid, \
+    ChomboHierarchy, \
+    ChomboDataset, \
+    Orion2Hierarchy, \
+    Orion2Dataset, \
+    ChomboPICHierarchy, \
+    ChomboPICDataset, \
+    PlutoHierarchy, \
+    PlutoDataset
 
 from .fields import \
-      ChomboFieldInfo
+    ChomboFieldInfo, \
+    Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, \
+    ChomboPICFieldInfo2D, \
+    ChomboPICFieldInfo3D, \
+    PlutoFieldInfo
 
 from .io import \
-      IOHandlerChomboHDF5
+    IOHandlerChomboHDF5,\
+    IOHandlerPlutoHDF5

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -18,6 +18,7 @@
 import os
 import weakref
 import numpy as np
+import six
 
 from collections import \
      defaultdict
@@ -42,7 +43,10 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo, Orion2FieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D, \
+    PlutoFieldInfo
+
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -89,6 +93,7 @@
         return [self.index.grids[cid - self._id_offset]
                 for cid in self._children_ids]
 
+
 class ChomboHierarchy(GridIndex):
 
     grid = ChomboGrid
@@ -123,7 +128,7 @@
         # only do anything if the dataset contains particles
         if not any([f[1].startswith('particle_') for f in self.field_list]):
             return
-        
+
         self.num_particles = 0
         particles_per_grid = []
         for key, val in self._handle.items():
@@ -189,7 +194,7 @@
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
-                
+
                 if D == 1:
                     si = np.concatenate((si, [0.0, 0.0]))
                     ei = np.concatenate((ei, [0.0, 0.0]))
@@ -236,6 +241,7 @@
             for child in grid.Children:
                 child._parent_id.append(i + grid._id_offset)
 
+
 class ChomboDataset(Dataset):
     _index_class = ChomboHierarchy
     _field_info_class = ChomboFieldInfo
@@ -254,12 +260,17 @@
         if D == 3:
             self.dataset_type = 'chombo_hdf5'
 
-        # some datasets will not be time-dependent, make
+        # some datasets will not be time-dependent, and to make
+        # matters worse, the simulation time is not always
+        # stored in the same place in the hdf file! Make
         # sure we handle that here.
         try:
             self.current_time = self._handle.attrs['time']
         except KeyError:
-            self.current_time = 0.0
+            try:
+                self.current_time = self._handle['/level_0'].attrs['time']
+            except KeyError:
+                self.current_time = 0.0
 
         self.geometry = "cartesian"
         self.ini_filename = ini_filename
@@ -274,10 +285,13 @@
         self.parameters["EOSType"] = -1 # default
 
     def _set_code_unit_attributes(self):
-        self.length_unit = YTQuantity(1.0, "cm")
-        self.mass_unit = YTQuantity(1.0, "g")
-        self.time_unit = YTQuantity(1.0, "s")
-        self.velocity_unit = YTQuantity(1.0, "cm/s")
+        mylog.warning("Setting code length to be 1.0 cm")
+        mylog.warning("Setting code mass to be 1.0 g")
+        mylog.warning("Setting code time to be 1.0 s")
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.time_unit = self.quan(1.0, "s")
+        self.velocity_unit = self.length_unit / self.time_unit
 
     def _localize(self, f, default):
         if f is None:
@@ -285,7 +299,7 @@
         return f
 
     def _parse_parameter_file(self):
-        
+
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
         self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
@@ -303,9 +317,19 @@
             self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
-        
+
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True, True, True)
+        self._determine_periodic()
+
+    def _determine_periodic(self):
+        # we default to true unless the HDF5 file says otherwise
+        is_periodic = np.array([True, True, True])
+        for dir in range(self.dimensionality):
+            try:
+                is_periodic[dir] = self._handle['/level_0'].attrs['is_periodic_%d' % dir]
+            except KeyError:
+                is_periodic[dir] = True
+        self.periodicity = tuple(is_periodic)
 
     def _calc_left_edge(self):
         fileh = self._handle
@@ -331,22 +355,23 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
-        pluto_ini_file_exists  = False
+        pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
-        if type(args[0]) == type(""):
+        if isinstance(args[0], six.string_types): 
             dir_name = os.path.dirname(os.path.abspath(args[0]))
             pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
             orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
 
-        if not (pluto_ini_file_exists and orion2_ini_file_exists):
+        if not (pluto_ini_file_exists or orion2_ini_file_exists):
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
                 # ORION2 simulations should always have this:
                 valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
+                valid = valid and not ('Charm_global' in fileh.keys())
                 fileh.close()
                 return valid
             except:
@@ -363,6 +388,138 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
+
+class PlutoHierarchy(ChomboHierarchy):
+
+    def __init__(self, ds, dataset_type="pluto_chombo_native"):
+        ChomboHierarchy.__init__(self, ds, dataset_type)
+
+    def _parse_index(self):
+        f = self._handle # shortcut
+        self.max_level = f.attrs['num_levels'] - 1
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.dataset.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_levels[i] = level_number
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)+self.domain_left_edge.value
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)+self.domain_left_edge.value
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+
+class PlutoDataset(ChomboDataset):
+
+    _index_class = PlutoHierarchy
+    _field_info_class = PlutoFieldInfo
+
+    def __init__(self, filename, dataset_type='pluto_chombo_native',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
+
+    def _parse_parameter_file(self):
+        """
+        Check to see whether a 'pluto.ini' file
+        exists in the plot file directory. If one does, attempt to parse it.
+        Otherwise grab the dimensions from the hdf5 file.
+        """
+
+        pluto_ini_file_exists = False
+        dir_name = os.path.dirname(os.path.abspath(self.fullplotdir))
+        pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+        pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_dimensions = self._calc_domain_dimensions()
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+
+        if pluto_ini_file_exists:
+            lines=[line.strip() for line in open(pluto_ini_filename)]
+            self.domain_left_edge = np.zeros(self.dimensionality)
+            self.domain_right_edge = np.zeros(self.dimensionality)
+            for il,ll in enumerate(lines[lines.index('[Grid]')+2:lines.index('[Grid]')+2+self.dimensionality]):
+                self.domain_left_edge[il] = float(ll.split()[2])
+                self.domain_right_edge[il] = float(ll.split()[-1])
+            self.periodicity = [0]*3
+            for il,ll in enumerate(lines[lines.index('[Boundary]')+2:lines.index('[Boundary]')+2+6:2]):
+                self.periodicity[il] = (ll.split()[1] == 'periodic')
+            self.periodicity=tuple(self.periodicity)
+            for il,ll in enumerate(lines[lines.index('[Parameters]')+2:]):
+                if (ll.split()[0] == 'GAMMA'):
+                    self.gamma = float(ll.split()[1])
+        else:
+            self.domain_left_edge = self._calc_left_edge()
+            self.domain_right_edge = self._calc_right_edge()
+            self.periodicity = (True, True, True)
+
+        # if a lower-dimensional dataset, set up pseudo-3D stuff here.
+        if self.dimensionality == 1:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists = False
+
+        if isinstance(args[0], six.string_types):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+
+        if pluto_ini_file_exists:
+            return True
+
+        return False
+
+
 class Orion2Hierarchy(ChomboHierarchy):
 
     def __init__(self, ds, dataset_type="orion_chombo_native"):
@@ -396,6 +553,7 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
+
 class Orion2Dataset(ChomboDataset):
 
     _index_class = Orion2Hierarchy
@@ -427,7 +585,7 @@
         self.domain_right_edge = self._calc_right_edge()
         self.domain_dimensions = self._calc_domain_dimensions()
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True, True, True)
+        self._determine_periodic()
 
     def _parse_inputs_file(self, ini_filename):
         self.fullplotdir = os.path.abspath(self.parameter_filename)
@@ -449,7 +607,7 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
-        pluto_ini_file_exists  = False
+        pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
         if type(args[0]) == type(""):
@@ -458,18 +616,70 @@
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
             orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
-        
+
         if orion2_ini_file_exists:
             return True
 
         if not pluto_ini_file_exists:
             try:
                 fileh = h5py.File(args[0],'r')
-                valid = "Chombo_global" in fileh["/"]
                 valid = 'CeilVA_mass' in fileh.attrs.keys()
+                valid = "Chombo_global" in fileh["/"] and "Charm_global" not in fileh["/"]
+                valid = valid and 'CeilVA_mass' in fileh.attrs.keys()
                 fileh.close()
                 return valid
             except:
                 pass
         return False
 
+
+class ChomboPICHierarchy(ChomboHierarchy):
+
+    def __init__(self, ds, dataset_type="chombo_hdf5"):
+        ChomboHierarchy.__init__(self, ds, dataset_type)
+
+
+class ChomboPICDataset(ChomboDataset):
+
+    _index_class = ChomboPICHierarchy
+    _field_info_class = ChomboPICFieldInfo3D
+
+    def __init__(self, filename, dataset_type='chombo_hdf5',
+                 storage_filename=None, ini_filename=None):
+
+        ChomboDataset.__init__(self, filename, dataset_type,
+                               storage_filename, ini_filename)
+
+        if self.dimensionality == 1:
+            self._field_info_class = ChomboPICFieldInfo1D
+
+        if self.dimensionality == 2:
+            self._field_info_class = ChomboPICFieldInfo2D
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists = False
+        orion2_ini_file_exists = False
+
+        if isinstance(args[0], six.string_types):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+
+        if orion2_ini_file_exists:
+            return False
+
+        if pluto_ini_file_exists:
+            return False
+
+        try:
+            fileh = h5py.File(args[0],'r')
+            valid = "Charm_global" in fileh["/"]
+            fileh.close()
+            return valid
+        except:
+            pass
+        return False

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -14,8 +14,13 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+from yt.units.unit_object import Unit
 from yt.fields.field_info_container import \
-    FieldInfoContainer
+    FieldInfoContainer, \
+    particle_deposition_functions, \
+    particle_vector_functions, \
+    standard_particle_fields
+
 from yt.frontends.boxlib.fields import \
     rho_units, \
     mom_units, \
@@ -27,6 +32,8 @@
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
+vel_units = "code_length / code_time"
+b_units = "code_magnetic"
 
 # Chombo does not have any known fields by itself.
 class ChomboFieldInfo(FieldInfoContainer):
@@ -88,3 +95,174 @@
                        units = "erg/cm**3")
         self.add_field("temperature", function=_temperature,
                        units="K")
+
+
+class ChomboPICFieldInfo3D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_z", ("code_length / code_time**2", [], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+        ("particle_velocity_z", ("code_length / code_time", [], None)),
+    )
+
+    # I am re-implementing this here to override a few default behaviors:
+    # I don't want to skip output units for code_length and I do want
+    # particle_fields to default to take_log = False.
+    def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
+        skip_output_units = ()
+        for f, (units, aliases, dn) in sorted(self.known_particle_fields):
+            units = self.ds.field_units.get((ptype, f), units)
+            if (f in aliases or ptype not in self.ds.particle_types_raw) and \
+                units not in skip_output_units:
+                u = Unit(units, registry = self.ds.unit_registry)
+                output_units = str(u.get_cgs_equivalent())
+            else:
+                output_units = units
+            if (ptype, f) not in self.field_list:
+                continue
+            self.add_output_field((ptype, f),
+                units = units, particle_type = True,
+                display_name = dn, output_units = output_units, take_log=False)
+            for alias in aliases:
+                self.alias((ptype, alias), (ptype, f), units = output_units)
+
+        # We'll either have particle_position or particle_position_[xyz]
+        if (ptype, "particle_position") in self.field_list or \
+           (ptype, "particle_position") in self.field_aliases:
+            particle_scalar_functions(ptype,
+                   "particle_position", "particle_velocity",
+                   self)
+        else:
+            # We need to check to make sure that there's a "known field" that
+            # overlaps with one of the vector fields.  For instance, if we are
+            # in the Stream frontend, and we have a set of scalar position
+            # fields, they will overlap with -- and be overridden by -- the
+            # "known" vector field that the frontend creates.  So the easiest
+            # thing to do is to simply remove the on-disk field (which doesn't
+            # exist) and replace it with a derived field.
+            if (ptype, "particle_position") in self and \
+                 self[ptype, "particle_position"]._function == NullFunc:
+                self.pop((ptype, "particle_position"))
+            particle_vector_functions(ptype,
+                    ["particle_position_%s" % ax for ax in 'xyz'],
+                    ["particle_velocity_%s" % ax for ax in 'xyz'],
+                    self)
+        particle_deposition_functions(ptype, "particle_position",
+            "particle_mass", self)
+        standard_particle_fields(self, ptype)
+        # Now we check for any leftover particle fields
+        for field in sorted(self.field_list):
+            if field in self: continue
+            if not isinstance(field, tuple):
+                raise RuntimeError
+            if field[0] not in self.ds.particle_types:
+                continue
+            self.add_output_field(field, 
+                                  units = self.ds.field_units.get(field, ""),
+                                  particle_type = True)
+        self.setup_smoothed_fields(ptype, 
+                                   num_neighbors=num_neighbors,
+                                   ftype=ftype)
+
+def _dummy_position(field, data):
+    return 0.5*np.ones_like(data['particle_position_x'])
+
+def _dummy_velocity(field, data):
+    return np.zeros_like(data['particle_velocity_x'])
+
+def _dummy_field(field, data):
+    return 0.0 * data['gravitational_field_x']
+
+fluid_field_types = ['chombo', 'gas']
+particle_field_types = ['io', 'all']
+
+class ChomboPICFieldInfo2D(ChomboPICFieldInfo3D):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", [], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, ds, field_list):
+        super(ChomboPICFieldInfo2D, self).__init__(ds, field_list)
+
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+
+        for ptype in particle_field_types:                
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+
+class ChomboPICFieldInfo1D(ChomboPICFieldInfo3D):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, ds, field_list):
+        super(ChomboPICFieldInfo1D, self).__init__(ds, field_list)
+
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_y'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                    units = "code_length / code_time**2")
+
+        for ptype in particle_field_types:
+            self.add_field((ptype, "particle_position_y"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_velocity_y"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+
+class PlutoFieldInfo(ChomboFieldInfo):
+    known_other_fields = (
+        ("rho", (rho_units, ["density"], None)),
+        ("prs", ("code_mass / (code_length * code_time**2)", ["pressure"], None)),
+        ("vx1", (vel_units, ["velocity_x"], None)),
+        ("vx2", (vel_units, ["velocity_y"], None)),
+        ("vx3", (vel_units, ["velocity_z"], None)),
+        ("bx1", (b_units, ["magnetic_field_x"], None)),
+        ("bx2", (b_units, ["magnetic_field_y"], None)),
+        ("bx3", (b_units, ["magnetic_field_z"], None)),
+    )
+
+    known_particle_fields = ()

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -19,7 +19,7 @@
 from yt.utilities.logger import ytLogger as mylog
 
 from yt.utilities.io_handler import \
-           BaseIOHandler
+    BaseIOHandler
 
 class IOHandlerChomboHDF5(BaseIOHandler):
     _dataset_type = "chombo_hdf5"
@@ -30,6 +30,18 @@
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
         self._handle = ds._handle
+        self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self._read_ghost_info()
+
+    def _read_ghost_info(self):
+        try:
+            self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
+            # pad with zeros if the dataset is low-dimensional
+            self.ghost += (3 - self.dim)*(0,)
+            self.ghost = np.array(self.ghost)
+        except KeyError:
+            # assume zero ghosts if outputGhosts not present
+            self.ghost = np.zeros(self.dim)
 
     _field_dict = None
     @property
@@ -62,18 +74,20 @@
         fns = [c[1] for c in f.attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
-
         lstring = 'level_%i' % grid.Level
         lev = self._handle[lstring]
         dims = grid.ActiveDimensions
-        boxsize = dims.prod()
+        shape = grid.ActiveDimensions + 2*self.ghost
+        boxsize = shape.prod()
         
         grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
+        data_no_ghost = data.reshape(shape, order='F')
+        ghost_slice = [slice(g, d-g, None) for g, d in zip(self.ghost, grid.ActiveDimensions)]
+        ghost_slice = ghost_slice[0:self.dim]
+        return data_no_ghost[ghost_slice]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -168,6 +182,8 @@
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
         self._handle = ds._handle
+        self.dim = 2
+        self._read_ghost_info()
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
@@ -177,7 +193,19 @@
     def __init__(self, ds, *args, **kwargs):
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
-        self._handle = ds._handle   
+        self.dim = 1
+        self._handle = ds._handle
+        self._read_ghost_info()
+
+class IOHandlerPlutoHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "pluto_chombo_native"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, ds, *args, **kwargs):
+        BaseIOHandler.__init__(self, ds, *args, **kwargs)
+        self.ds = ds
+        self._handle = ds._handle
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"
@@ -185,7 +213,7 @@
     def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files
-             
+
         """
 
         fn = grid.ds.fullplotdir[:-4] + "sink"

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -22,7 +22,8 @@
     data_dir_load
 from yt.frontends.chombo.api import \
     ChomboDataset, \
-    Orion2Dataset
+    Orion2Dataset, \
+    PlutoDataset
 
 _fields = ("density", "velocity_magnitude",  # "velocity_divergence",
            "magnetic_field_x")
@@ -57,6 +58,14 @@
         test_tb.__name__ = test.description
         yield test
 
+kho = "KelvinHelmholtz/data.0004.hdf5"
+ at requires_ds(kho)
+def test_kho():
+    ds = data_dir_load(kho)
+    yield assert_equal, str(ds), "data.0004.hdf5"
+    for test in small_patch_amr(kho, _fields):
+        test_gc.__name__ = test.description
+        yield test
 
 @requires_file(zp)
 def test_ChomboDataset():
@@ -68,6 +77,6 @@
     assert isinstance(data_dir_load(gc), Orion2Dataset)
 
 
-#@requires_file(kho)
-#def test_PlutoDataset():
-#    assert isinstance(data_dir_load(kho), PlutoDataset)
+ at requires_file(kho)
+def test_PlutoDataset():
+    assert isinstance(data_dir_load(kho), PlutoDataset)

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -49,9 +49,9 @@
     OffAxisProjectionPlot
 
 from .profile_plotter import \
-     ProfilePlot, \
-     PhasePlot
-    
+    ProfilePlot, \
+    PhasePlot
+
 from .base_plot_types import \
     get_multi_plot
 

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -121,7 +121,7 @@
         return defaultdict.__init__(self, default_factory)
 
 class ImagePlotContainer(object):
-    """A countainer for plots with colorbars.
+    """A container for plots with colorbars.
 
     """
     _plot_type = None
@@ -472,7 +472,6 @@
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.
-        function name.
 
         If yt is being run from within an IPython session, and it is able to
         determine this, this function will send any existing plots to the

diff -r dc60658345cd68e6e29218b52b0566053c62b144 -r d596ad6eafb7a3f32ed448f66aaa633aae216520 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -184,7 +184,7 @@
     ...                                  plot_specs=plot_specs)
     >>> plot.save()
 
-    Use plot_line_property to change line properties of one or all profiles.
+    Use set_line_property to change line properties of one or all profiles.
     
     """
     x_log = None
@@ -256,7 +256,6 @@
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.
-        function name.
 
         If yt is being run from within an IPython session, and it is able to
         determine this, this function will send any existing plots to the

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list