[yt-svn] commit/yt: 17 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 16 12:40:42 PDT 2015


17 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/b416c9046eb4/
Changeset:   b416c9046eb4
Branch:      yt
User:        RicardaBeckmann
Date:        2015-06-05 11:09:07+00:00
Summary:     updated .hgignore
Affected #:  14 files

diff -r 7f61f5a20c2cfa2e632054578b572bdf36f75c61 -r b416c9046eb4860f7552a7dd76ce437c8138e034 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -63,3 +63,6 @@
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
 dist
+*~
+*#
+#*

diff -r 7f61f5a20c2cfa2e632054578b572bdf36f75c61 -r b416c9046eb4860f7552a7dd76ce437c8138e034 yt/fields/particle_fields.py~
--- /dev/null
+++ b/yt/fields/particle_fields.py~
@@ -0,0 +1,576 @@
+"""
+These are common particle fields.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import *
+from yt.units.yt_array import YTArray
+from yt.fields.derived_field import \
+    ValidateParameter, \
+    ValidateSpatial
+from yt.utilities.physical_constants import \
+    mass_hydrogen_cgs, \
+    mass_sun_cgs, \
+    mh
+
+from yt.units.yt_array import \
+    uconcatenate
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi, \
+    periodic_dist, euclidean_dist
+
+from .vector_operations import \
+     create_magnitude_field
+    
+def _field_concat(fname):
+    def _AllFields(field, data):
+        v = []
+        for ptype in data.ds.particle_types:
+            data.ds._last_freq = (ptype, None)
+            if ptype == "all" or \
+                ptype in data.ds.known_filters:
+                  continue
+            v.append(data[ptype, fname].copy())
+        rv = uconcatenate(v, axis=0)
+        return rv
+    return _AllFields
+
+def _field_concat_slice(fname, axi):
+    def _AllFields(field, data):
+        v = []
+        for ptype in data.ds.particle_types:
+            data.ds._last_freq = (ptype, None)
+            if ptype == "all" or \
+                ptype in data.ds.known_filters:
+                  continue
+            v.append(data[ptype, fname][:,axi])
+        rv = uconcatenate(v, axis=0)
+        return rv
+    return _AllFields
+
+def particle_deposition_functions(ptype, coord_name, mass_name, registry):
+    orig = set(registry.keys())
+    ptype_dn = ptype.replace("_","\/").title()
+    def particle_count(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, method = "count")
+        d = data.ds.arr(d, input_units = "cm**-3")
+        return data.apply_units(d, field.units)
+
+    registry.add_field(("deposit", "%s_count" % ptype),
+             function = particle_count,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Count}" % ptype_dn)
+
+    def particle_mass(field, data):
+        pos = data[ptype, coord_name]
+        pmass = data[ptype, mass_name].in_units(field.units)
+        d = data.deposit(pos, [pmass], method = "sum")
+        return data.apply_units(d, field.units)
+
+    registry.add_field(("deposit", "%s_mass" % ptype),
+             function = particle_mass,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Mass}" % ptype_dn,
+             units = "g")
+             
+    def particle_density(field, data):
+        pos = data[ptype, coord_name]
+        mass = data[ptype, mass_name]
+        pos.convert_to_units("code_length")
+        mass.convert_to_units("code_mass")
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d = data.ds.arr(d, "code_mass")
+        d /= data["index", "cell_volume"]
+        return d
+
+    registry.add_field(("deposit", "%s_density" % ptype),
+             function = particle_density,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Density}" % ptype_dn,
+             units = "g/cm**3")
+
+    def particle_cic(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
+        d = data.apply_units(d, data[ptype, mass_name].units)
+        d /= data["index", "cell_volume"]
+        return d
+
+    registry.add_field(("deposit", "%s_cic" % ptype),
+             function = particle_cic,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s CIC Density}" % ptype_dn,
+             units = "g/cm**3")
+
+    def _get_density_weighted_deposit_field(fname, units, method):
+        def _deposit_field(field, data):
+            """
+            Create a grid field for particle quantities weighted by particle
+            mass, using cloud-in-cell deposit.
+            """
+            pos = data[ptype, "particle_position"]
+            # Get back into density
+            pden = data[ptype, 'particle_mass']
+            top = data.deposit(pos, [data[(ptype, fname)]*pden], method=method)
+            bottom = data.deposit(pos, [pden], method=method)
+            top[bottom == 0] = 0.0
+            bnz = bottom.nonzero()
+            top[bnz] /= bottom[bnz]
+            d = data.ds.arr(top, input_units=units)
+            return d
+        return _deposit_field
+
+    for ax in 'xyz':
+        for method, name in zip(("cic", "sum"), ("cic", "nn")):
+            function = _get_density_weighted_deposit_field(
+                "particle_velocity_%s" % ax, "cm/s", method)
+            registry.add_field(
+                ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)),
+                function=function, units="cm/s", take_log=False,
+                validators=[ValidateSpatial(0)])
+
+    # Now some translation functions.
+
+    def particle_ones(field, data):
+        v = np.ones(data[ptype, mass_name].shape, dtype="float64")
+        return data.apply_units(v, field.units)
+
+    registry.add_field((ptype, "particle_ones"),
+                       function = particle_ones,
+                       particle_type = True,
+                       units = "")
+
+    def particle_mesh_ids(field, data):
+        pos = data[ptype, coord_name]
+        ids = np.zeros(pos.shape[0], dtype="float64") - 1
+        # This is float64 in name only.  It will be properly cast inside the
+        # deposit operation.
+        #_ids = ids.view("float64")
+        data.deposit(pos, [ids], method = "mesh_id")
+        return data.apply_units(ids, "")
+    registry.add_field((ptype, "mesh_id"),
+            function = particle_mesh_ids,
+            validators = [ValidateSpatial()],
+            particle_type = True)
+
+    return list(set(registry.keys()).difference(orig))
+
+def particle_scalar_functions(ptype, coord_name, vel_name, registry):
+
+    # Now we have to set up the various velocity and coordinate things.  In the
+    # future, we'll actually invert this and use the 3-component items
+    # elsewhere, and stop using these.
+    
+    # Note that we pass in _ptype here so that it's defined inside the closure.
+
+    def _get_coord_funcs(axi, _ptype):
+        def _particle_velocity(field, data):
+            return data[_ptype, vel_name][:,axi]
+        def _particle_position(field, data):
+            return data[_ptype, coord_name][:,axi]
+        return _particle_velocity, _particle_position
+    for axi, ax in enumerate("xyz"):
+        v, p = _get_coord_funcs(axi, ptype)
+        registry.add_field((ptype, "particle_velocity_%s" % ax),
+            particle_type = True, function = v,
+            units = "code_velocity")
+        registry.add_field((ptype, "particle_position_%s" % ax),
+            particle_type = True, function = p,
+            units = "code_length")
+
+def particle_vector_functions(ptype, coord_names, vel_names, registry):
+
+    # This will column_stack a set of scalars to create vector fields.
+
+    def _get_vec_func(_ptype, names):
+        def particle_vectors(field, data):
+            v = [data[_ptype, name].in_units(field.units)
+                  for name in names]
+            c = np.column_stack(v)
+            return data.apply_units(c, field.units)
+        return particle_vectors
+    registry.add_field((ptype, "particle_position"),
+                       function=_get_vec_func(ptype, coord_names),
+                       units = "code_length",
+                       particle_type=True)
+    registry.add_field((ptype, "particle_velocity"),
+                       function=_get_vec_func(ptype, vel_names),
+                       units = "cm / s",
+                       particle_type=True)
+
+def standard_particle_fields(registry, ptype,
+                             spos = "particle_position_%s",
+                             svel = "particle_velocity_%s"):
+    # This function will set things up based on the scalar fields and standard
+    # yt field names.
+
+    def _particle_velocity_magnitude(field, data):
+        """ M{|v|} """
+        bulk_velocity = data.get_field_parameter("bulk_velocity")
+        if bulk_velocity is None:
+            bulk_velocity = np.zeros(3)
+        return np.sqrt((data[ptype, svel % 'x'] - bulk_velocity[0])**2
+                     + (data[ptype, svel % 'y'] - bulk_velocity[1])**2
+                     + (data[ptype, svel % 'z'] - bulk_velocity[2])**2 )
+    
+    registry.add_field((ptype, "particle_velocity_magnitude"),
+                  function=_particle_velocity_magnitude,
+                  particle_type=True,
+                  take_log=False,
+                  units="cm/s")
+
+    def _particle_specific_angular_momentum(field, data):
+        """
+        Calculate the angular of a particle velocity.  Returns a vector for each
+        particle.
+        """
+        if data.has_field_parameter("bulk_velocity"):
+            bv = data.get_field_parameter("bulk_velocity")
+        else: bv = np.zeros(3, dtype=np.float64)
+        xv = data[ptype, svel % 'x'] - bv[0]
+        yv = data[ptype, svel % 'y'] - bv[1]
+        zv = data[ptype, svel % 'z'] - bv[2]
+        center = data.get_field_parameter('center')
+        coords = YTArray([data[ptype, spos % 'x'],
+                           data[ptype, spos % 'y'],
+                           data[ptype, spos % 'z']], dtype=np.float64)
+        new_shape = tuple([3] + [1]*(len(coords.shape)-1))
+        r_vec = coords - np.reshape(center,new_shape)
+        v_vec = YTArray([xv,yv,zv], dtype=np.float64)
+        return np.cross(r_vec, v_vec, axis=0)
+
+    registry.add_field((ptype, "particle_specific_angular_momentum"),
+              function=_particle_specific_angular_momentum,
+              particle_type=True,
+              units="cm**2/s",
+              validators=[ValidateParameter("center")])
+
+    def _particle_specific_angular_momentum_x(field, data):
+        if data.has_field_parameter("bulk_velocity"):
+            bv = data.get_field_parameter("bulk_velocity")
+        else: bv = np.zeros(3, dtype=np.float64)
+        center = data.get_field_parameter('center')
+        y = data[ptype, spos % "y"] - center[1]
+        z = data[ptype, spos % "z"] - center[2]
+        yv = data[ptype, svel % "y"] - bv[1]
+        zv = data[ptype, svel % "z"] - bv[2]
+        return yv*z - zv*y
+
+    registry.add_field((ptype, "particle_specific_angular_momentum_x"),
+              function=_particle_specific_angular_momentum_x,
+              particle_type=True,
+              units="cm**2/s",
+              validators=[ValidateParameter("center")])
+
+    def _particle_specific_angular_momentum_y(field, data):
+        if data.has_field_parameter("bulk_velocity"):
+            bv = data.get_field_parameter("bulk_velocity")
+        else: bv = np.zeros(3, dtype=np.float64)
+        center = data.get_field_parameter('center')
+        x = data[ptype, spos % "x"] - center[0]
+        z = data[ptype, spos % "z"] - center[2]
+        xv = data[ptype, svel % "x"] - bv[0]
+        zv = data[ptype, svel % "z"] - bv[2]
+        return -(xv*z - zv*x)
+
+    registry.add_field((ptype, "particle_specific_angular_momentum_y"),
+              function=_particle_specific_angular_momentum_y,
+              particle_type=True,
+              units="cm**2/s",
+              validators=[ValidateParameter("center")])
+
+    def _particle_specific_angular_momentum_z(field, data):
+        if data.has_field_parameter("bulk_velocity"):
+            bv = data.get_field_parameter("bulk_velocity")
+        else: bv = np.zeros(3, dtype=np.float64)
+        center = data.get_field_parameter('center')
+        x = data[ptype, spos % "x"] - center[0]
+        y = data[ptype, spos % "y"] - center[1]
+        xv = data[ptype, svel % "x"] - bv[0]
+        yv = data[ptype, svel % "y"] - bv[1]
+        return xv*y - yv*x
+
+    registry.add_field((ptype, "particle_specific_angular_momentum_z"),
+              function=_particle_specific_angular_momentum_z,
+              particle_type=True,
+              units="cm**2/s",
+              validators=[ValidateParameter("center")])
+
+    create_magnitude_field(registry, "particle_specific_angular_momentum",
+                           "cm**2/s", ftype=ptype, particle_type=True)
+    
+    def _particle_angular_momentum_x(field, data):
+        return data[ptype, "particle_mass"] * \
+               data[ptype, "particle_specific_angular_momentum_x"]
+    registry.add_field((ptype, "particle_angular_momentum_x"),
+             function=_particle_angular_momentum_x,
+             units="g*cm**2/s", particle_type=True,
+             validators=[ValidateParameter('center')])
+
+    def _particle_angular_momentum_y(field, data):
+        return data[ptype, "particle_mass"] * \
+               data[ptype, "particle_specific_angular_momentum_y"]
+    registry.add_field((ptype, "particle_angular_momentum_y"),
+             function=_particle_angular_momentum_y,
+             units="g*cm**2/s", particle_type=True,
+             validators=[ValidateParameter('center')])
+
+    def _particle_angular_momentum_z(field, data):
+        return data[ptype, "particle_mass"] * \
+               data[ptype, "particle_specific_angular_momentum_z"]
+    registry.add_field((ptype, "particle_angular_momentum_z"),
+             function=_particle_angular_momentum_z,
+             units="g*cm**2/s", particle_type=True,
+             validators=[ValidateParameter('center')])
+
+    def _particle_angular_momentum(field, data):
+        return data[ptype, "particle_mass"] \
+            * data[ptype, "particle_specific_angular_momentum"]
+    registry.add_field((ptype, "particle_angular_momentum"),
+              function=_particle_angular_momentum,
+              particle_type=True,
+              units="g*cm**2/s",
+              validators=[ValidateParameter("center")])
+
+    create_magnitude_field(registry, "particle_angular_momentum",
+                           "g*cm**2/s", ftype=ptype, particle_type=True)
+    
+    from .field_functions import \
+        get_radius
+
+    def _particle_radius(field, data):
+        return get_radius(data, "particle_position_")
+    registry.add_field((ptype, "particle_radius"),
+              function=_particle_radius,
+              validators=[ValidateParameter("center")],
+              units="cm", particle_type = True,
+              display_name = "Particle Radius")
+
+    def _particle_spherical_position_radius(field, data):
+        """
+        Radial component of the particles' position vectors in spherical coords
+        on the provided field parameters for 'normal', 'center', and 
+        'bulk_velocity', 
+        """
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        sphr = get_sph_r_component(pos, theta, phi, normal)
+        return sphr
+
+    registry.add_field((ptype, "particle_spherical_position_radius"),
+              function=_particle_spherical_position_radius,
+              particle_type=True, units="cm",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _particle_spherical_position_theta(field, data):
+        """
+        Theta component of the particles' position vectors in spherical coords
+        on the provided field parameters for 'normal', 'center', and 
+        'bulk_velocity', 
+        """
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        spht = get_sph_theta_component(pos, theta, phi, normal)
+        return spht
+
+    registry.add_field((ptype, "particle_spherical_position_theta"),
+              function=_particle_spherical_position_theta,
+              particle_type=True, units="cm",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _particle_spherical_position_phi(field, data):
+        """
+        Phi component of the particles' position vectors in spherical coords
+        on the provided field parameters for 'normal', 'center', and 
+        'bulk_velocity', 
+        """
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        sphp = get_sph_phi_component(pos, phi, normal)
+        return sphp
+
+    registry.add_field((ptype, "particle_spherical_position_phi"),
+              function=_particle_spherical_position_phi,
+              particle_type=True, units="cm",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _particle_spherical_velocity_radius(field, data):
+        """
+        Radial component of the particles' velocity vectors in spherical coords
+        based on the provided field parameters for 'normal', 'center', and 
+        'bulk_velocity', 
+        """
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        vel = svel
+        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
+        sphr = get_sph_r_component(vel, theta, phi, normal)
+        return sphr
+
+    registry.add_field((ptype, "particle_spherical_velocity_radius"),
+              function=_particle_spherical_velocity_radius,
+              particle_type=True, units="cm/s",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    # This is simply aliased to "particle_spherical_velocity_radius"
+    # for ease of use.
+    registry.add_field((ptype, "particle_radial_velocity"),
+              function=_particle_spherical_velocity_radius,
+              particle_type=True, units="cm/s",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _particle_spherical_velocity_theta(field, data):
+        """
+        Theta component of the particles' velocity vectors in spherical coords
+        based on the provided field parameters for 'normal', 'center', and 
+        'bulk_velocity', 
+        """
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        vel = svel
+        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
+        spht = get_sph_theta_component(vel, theta, phi, normal)
+        return spht
+
+    registry.add_field((ptype, "particle_spherical_velocity_theta"),
+              function=_particle_spherical_velocity_theta,
+              particle_type=True, units="cm/s",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _particle_spherical_velocity_phi(field, data):
+        """
+        Phi component of the particles' velocity vectors in spherical coords
+        based on the provided field parameters for 'normal', 'center', and 
+        'bulk_velocity', 
+        """
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
+        sphp = get_sph_phi_component(vel, phi, normal)
+        return sphp
+
+    registry.add_field((ptype, "particle_spherical_velocity_phi"),
+              function=_particle_spherical_velocity_phi,
+              particle_type=True, units="cm/s",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+def add_particle_average(registry, ptype, field_name, 
+                         weight = "particle_mass",
+                         density = True):
+    field_units = registry[ptype, field_name].units
+    def _pfunc_avg(field, data):
+        pos = data[ptype, "particle_position"]
+        f = data[ptype, field_name]
+        wf = data[ptype, weight]
+        f *= wf
+        v = data.deposit(pos, [f], method = "sum")
+        w = data.deposit(pos, [wf], method = "sum")
+        v /= w
+        if density: v /= data["index", "cell_volume"]
+        v[np.isnan(v)] = 0.0
+        return v
+    fn = ("deposit", "%s_avg_%s" % (ptype, field_name))
+    registry.add_field(fn, function=_pfunc_avg,
+                       validators = [ValidateSpatial(0)],
+                       particle_type = False,
+                       units = field_units)
+    return fn
+
+def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
+        smoothing_length_name, density_name, smoothed_field, registry,
+        nneighbors = None):
+    field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
+    field_units = registry[ptype, smoothed_field].units
+    def _vol_weight(field, data):
+        pos = data[ptype, coord_name].in_units("code_length")
+        mass = data[ptype, mass_name].in_cgs()
+        dens = data[ptype, density_name].in_cgs()
+        quan = data[ptype, smoothed_field].in_units(field_units)
+        if smoothing_length_name is None:
+            hsml = np.zeros(quan.shape, dtype='float64') - 1
+            hsml = data.apply_units(hsml, "code_length")
+        else:
+            hsml = data[ptype, smoothing_length_name].in_units("code_length")
+        kwargs = {}
+        if nneighbors:
+            kwargs['nneighbors'] = nneighbors
+        rv = data.smooth(pos, [mass, hsml, dens, quan],
+                         method="volume_weighted",
+                         create_octree = True)[0]
+        rv[np.isnan(rv)] = 0.0
+        # Now some quick unit conversions.
+        rv = data.apply_units(rv, field_units)
+        return rv
+    registry.add_field(field_name, function = _vol_weight,
+                       validators = [ValidateSpatial(0)],
+                       units = field_units)
+    return [field_name]
+

diff -r 7f61f5a20c2cfa2e632054578b572bdf36f75c61 -r b416c9046eb4860f7552a7dd76ce437c8138e034 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,13 +491,21 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
-        # Note that unit_l *already* converts to proper!
-        # Also note that unit_l must be multiplied by the boxlen parameter to
-        # ensure we are correctly set up for the current domain.
-        length_unit = self.parameters['unit_l']
-        boxlen = self.parameters['boxlen']
-        density_unit = self.parameters['unit_d']
-        mass_unit = density_unit * (length_unit * boxlen)**3
+        #Please note that for all units given in the info file, the boxlen
+        #still needs to be folded in, as shown below!
+
+        length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
+        rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
+
+        # In the mass unit, the factors of boxlen cancel back out, so this 
+        #is equivalent to unit_d*unit_l**3
+
+        mass_unit = rho_u * length_unit**3
+
+        # Cosmological runs are done in lookback conformal time. 
+        # To convert to proper time, the time unit is calculated from 
+        # the expansion factor. This is not yet  done here!
+
         time_unit = self.parameters['unit_t']
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))

diff -r 7f61f5a20c2cfa2e632054578b572bdf36f75c61 -r b416c9046eb4860f7552a7dd76ce437c8138e034 yt/frontends/ramses/data_structures.py.orig
--- /dev/null
+++ b/yt/frontends/ramses/data_structures.py.orig
@@ -0,0 +1,596 @@
+"""
+RAMSES-specific data structures
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+import stat
+import weakref
+import cStringIO
+<<<<<<< local
+import scipy.integrate
+
+=======
+>>>>>>> other
+from yt.funcs import *
+from yt.geometry.oct_geometry_handler import \
+    OctreeIndex
+from yt.geometry.geometry_handler import \
+    Index, YTDataChunk
+from yt.data_objects.static_output import \
+    Dataset
+from yt.data_objects.octree_subset import \
+    OctreeSubset
+
+from .definitions import ramses_header, field_aliases
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import mp, kb
+from .fields import \
+    RAMSESFieldInfo, _X
+import yt.utilities.fortran_utils as fpu
+from yt.geometry.oct_container import \
+    RAMSESOctreeContainer
+from yt.fields.particle_fields import \
+    standard_particle_fields
+
+class RAMSESDomainFile(object):
+    _last_mask = None
+    _last_selector_id = None
+
+    def __init__(self, ds, domain_id):
+        self.ds = ds
+        self.domain_id = domain_id
+        self.nvar = 0 # Set this later!
+        num = os.path.basename(ds.parameter_filename).split("."
+                )[0].split("_")[1]
+        basename = "%s/%%s_%s.out%05i" % (
+            os.path.abspath(
+              os.path.dirname(ds.parameter_filename)),
+            num, domain_id)
+        for t in ['grav', 'hydro', 'part', 'amr']:
+            setattr(self, "%s_fn" % t, basename % t)
+        self._read_amr_header()
+        self._read_hydro_header()
+        self._read_particle_header()
+        self._read_amr()
+
+    _hydro_offset = None
+    _level_count = None
+
+    def __repr__(self):
+        return "RAMSESDomainFile: %i" % self.domain_id
+
+    def _is_hydro(self):
+        '''
+        Does the output include hydro?
+        '''
+        return os.path.exists(self.hydro_fn)
+
+    @property
+    def level_count(self):
+        if self._level_count is not None: return self._level_count
+        self.hydro_offset
+        return self._level_count
+
+    @property
+    def hydro_offset(self):
+        if self._hydro_offset is not None: return self._hydro_offset
+        # We now have to open the file and calculate it
+        f = open(self.hydro_fn, "rb")
+        fpu.skip(f, 6)
+        # It goes: level, CPU, 8-variable
+        min_level = self.ds.min_level
+        n_levels = self.amr_header['nlevelmax'] - min_level
+        hydro_offset = np.zeros(n_levels, dtype='int64')
+        hydro_offset -= 1
+        level_count = np.zeros(n_levels, dtype='int64')
+        skipped = []
+        for level in range(self.amr_header['nlevelmax']):
+            for cpu in range(self.amr_header['nboundary'] +
+                             self.amr_header['ncpu']):
+                header = ( ('file_ilevel', 1, 'I'),
+                           ('file_ncache', 1, 'I') )
+                try:
+                    hvals = fpu.read_attrs(f, header, "=")
+                except AssertionError:
+                    print "You are running with the wrong number of fields."
+                    print "If you specified these in the load command, check the array length."
+                    print "In this file there are %s hydro fields." % skipped
+                    #print "The last set of field sizes was: %s" % skipped
+                    raise
+                if hvals['file_ncache'] == 0: continue
+                assert(hvals['file_ilevel'] == level+1)
+                if cpu + 1 == self.domain_id and level >= min_level:
+                    hydro_offset[level - min_level] = f.tell()
+                    level_count[level - min_level] = hvals['file_ncache']
+                skipped = fpu.skip(f, 8 * self.nvar)
+        self._hydro_offset = hydro_offset
+        self._level_count = level_count
+        return self._hydro_offset
+
+    def _read_hydro_header(self):
+        # If no hydro file is found, return
+        if not self._is_hydro():
+            return
+        if self.nvar > 0: return self.nvar
+        # Read the number of hydro  variables
+        f = open(self.hydro_fn, "rb")
+        fpu.skip(f, 1)
+        self.nvar = fpu.read_vector(f, "i")[0]
+
+    def _read_particle_header(self):
+        if not os.path.exists(self.part_fn):
+            self.local_particle_count = 0
+            self.particle_field_offsets = {}
+            return
+        f = open(self.part_fn, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
+        hvals = {}
+        attrs = ( ('ncpu', 1, 'I'),
+                  ('ndim', 1, 'I'),
+                  ('npart', 1, 'I') )
+        hvals.update(fpu.read_attrs(f, attrs))
+        fpu.read_vector(f, 'I')
+
+        attrs = ( ('nstar_tot', 1, 'I'),
+                  ('mstar_tot', 1, 'd'),
+                  ('mstar_lost', 1, 'd'),
+                  ('nsink', 1, 'I') )
+        hvals.update(fpu.read_attrs(f, attrs))
+        self.particle_header = hvals
+        self.local_particle_count = hvals['npart']
+        particle_fields = [
+                ("particle_position_x", "d"),
+                ("particle_position_y", "d"),
+                ("particle_position_z", "d"),
+                ("particle_velocity_x", "d"),
+                ("particle_velocity_y", "d"),
+                ("particle_velocity_z", "d"),
+                ("particle_mass", "d"),
+                ("particle_identifier", "I"),
+                ("particle_refinement_level", "I")]
+        if hvals["nstar_tot"] > 0:
+            particle_fields += [("particle_age", "d"),
+                                ("particle_metallicity", "d")]
+
+        field_offsets = {}
+        _pfields = {}
+        for field, vtype in particle_fields:
+            if f.tell() >= flen: break
+            field_offsets["io", field] = f.tell()
+            _pfields["io", field] = vtype
+            fpu.skip(f, 1)
+        self.particle_field_offsets = field_offsets
+        self.particle_field_types = _pfields
+        self.particle_types = self.particle_types_raw = ("io",)
+
+    def _read_amr_header(self):
+        hvals = {}
+        f = open(self.amr_fn, "rb")
+        for header in ramses_header(hvals):
+            hvals.update(fpu.read_attrs(f, header))
+        # That's the header, now we skip a few.
+        hvals['numbl'] = np.array(hvals['numbl']).reshape(
+            (hvals['nlevelmax'], hvals['ncpu']))
+        fpu.skip(f)
+        if hvals['nboundary'] > 0:
+            fpu.skip(f, 2)
+            self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
+        else:
+            self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
+        free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
+        ordering = fpu.read_vector(f, 'c')
+        fpu.skip(f, 4)
+        # Now we're at the tree itself
+        # Now we iterate over each level and each CPU.
+        self.amr_header = hvals
+        self.amr_offset = f.tell()
+        self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
+        self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
+
+    def _read_amr(self):
+        """Open the oct file, read in octs level-by-level.
+           For each oct, only the position, index, level and domain 
+           are needed - its position in the octree is found automatically.
+           The most important is finding all the information to feed
+           oct_handler.add
+        """
+        self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
+                self.ds.domain_left_edge, self.ds.domain_right_edge)
+        root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
+        self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
+        fb = open(self.amr_fn, "rb")
+        fb.seek(self.amr_offset)
+        f = cStringIO.StringIO()
+        f.write(fb.read())
+        f.seek(0)
+        mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
+            self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
+        def _ng(c, l):
+            if c < self.amr_header['ncpu']:
+                ng = self.amr_header['numbl'][l, c]
+            else:
+                ng = self.ngridbound[c - self.amr_header['ncpu'] +
+                                self.amr_header['nboundary']*l]
+            return ng
+        min_level = self.ds.min_level
+        # yt max level is not the same as the RAMSES one.
+        # yt max level is the maximum number of additional refinement levels
+        # so for a uni grid run with no refinement, it would be 0. 
+        # So we initially assume that.
+        max_level = 0
+        nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
+        for level in range(self.amr_header['nlevelmax']):
+            # Easier if do this 1-indexed
+            for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
+                #ng is the number of octs on this level on this domain
+                ng = _ng(cpu, level)
+                if ng == 0: continue
+                ind = fpu.read_vector(f, "I").astype("int64")
+                fpu.skip(f, 2)
+                pos = np.empty((ng, 3), dtype='float64')
+                pos[:,0] = fpu.read_vector(f, "d") - nx
+                pos[:,1] = fpu.read_vector(f, "d") - ny
+                pos[:,2] = fpu.read_vector(f, "d") - nz
+                #pos *= self.ds.domain_width
+                #pos += self.dataset.domain_left_edge
+                fpu.skip(f, 31)
+                #parents = fpu.read_vector(f, "I")
+                #fpu.skip(f, 6)
+                #children = np.empty((ng, 8), dtype='int64')
+                #for i in range(8):
+                #    children[:,i] = fpu.read_vector(f, "I")
+                #cpu_map = np.empty((ng, 8), dtype="int64")
+                #for i in range(8):
+                #    cpu_map[:,i] = fpu.read_vector(f, "I")
+                #rmap = np.empty((ng, 8), dtype="int64")
+                #for i in range(8):
+                #    rmap[:,i] = fpu.read_vector(f, "I")
+                # We don't want duplicate grids.
+                # Note that we're adding *grids*, not individual cells.
+                if level >= min_level:
+                    assert(pos.shape[0] == ng)
+                    n = self.oct_handler.add(cpu + 1, level - min_level, pos,
+                                count_boundary = 1)
+                    self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
+                    if n > 0: max_level = max(level - min_level, max_level)
+        self.max_level = max_level
+        self.oct_handler.finalize()
+
+    def _error_check(self, cpu, level, pos, n, ng, nn):
+        # NOTE: We have the second conditional here because internally, it will
+        # not add any octs in that case.
+        if n == ng or cpu + 1 > self.oct_handler.num_domains:
+            return
+        # This is where we now check for issues with creating the new octs, and
+        # we attempt to determine what precisely is going wrong.
+        # These are all print statements.
+        print "We have detected an error with the construction of the Octree."
+        print "  The number of Octs to be added :  %s" % ng
+        print "  The number of Octs added       :  %s" % n
+        print "  Level                          :  %s" % level
+        print "  CPU Number (0-indexed)         :  %s" % cpu
+        for i, ax in enumerate('xyz'):
+            print "  extent [%s]                     :  %s %s" % \
+            (ax, pos[:,i].min(), pos[:,i].max())
+        print "  domain left                    :  %s" % \
+            (self.ds.domain_left_edge,)
+        print "  domain right                   :  %s" % \
+            (self.ds.domain_right_edge,)
+        print "  offset applied                 :  %s %s %s" % \
+            (nn[0], nn[1], nn[2])
+        print "AMR Header:"
+        for key in sorted(self.amr_header):
+            print "   %-30s: %s" % (key, self.amr_header[key])
+        raise RuntimeError
+
+    def included(self, selector):
+        if getattr(selector, "domain_id", None) is not None:
+            return selector.domain_id == self.domain_id
+        domain_ids = self.oct_handler.domain_identify(selector)
+        return self.domain_id in domain_ids
+
+class RAMSESDomainSubset(OctreeSubset):
+
+    _domain_offset = 1
+
+    def fill(self, content, fields, selector):
+        # Here we get a copy of the file, which we skip through and read the
+        # bits we want.
+        oct_handler = self.oct_handler
+        all_fields = self.domain.ds.index.fluid_field_list
+        fields = [f for ft, f in fields]
+        tr = {}
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
+        for field in fields:
+            tr[field] = np.zeros(cell_count, 'float64')
+        for level, offset in enumerate(self.domain.hydro_offset):
+            if offset == -1: continue
+            content.seek(offset)
+            nc = self.domain.level_count[level]
+            temp = {}
+            for field in all_fields:
+                temp[field] = np.empty((nc, 8), dtype="float64")
+            for i in range(8):
+                for field in all_fields:
+                    if field not in fields:
+                        fpu.skip(content)
+                    else:
+                        temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
+            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
+        return tr
+
+class RAMSESIndex(OctreeIndex):
+
+    def __init__(self, ds, dataset_type='ramses'):
+        self.fluid_field_list = ds._fields_in_file
+        self.dataset_type = dataset_type
+        self.dataset = weakref.proxy(ds)
+        self.index_filename = self.dataset.parameter_filename
+        self.directory = os.path.dirname(self.index_filename)
+        self.max_level = None
+
+        self.float_type = np.float64
+        super(RAMSESIndex, self).__init__(ds, dataset_type)
+
+    def _initialize_oct_handler(self):
+        self.domains = [RAMSESDomainFile(self.dataset, i + 1)
+                        for i in range(self.dataset['ncpu'])]
+        total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
+                         for dom in self.domains)
+        self.max_level = max(dom.max_level for dom in self.domains)
+        self.num_grids = total_octs
+
+    def _detect_output_fields(self):
+        # Do we want to attempt to figure out what the fields are in the file?
+        dsl = set([])
+        if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
+            self._setup_auto_fields()
+        for domain in self.domains:
+            dsl.update(set(domain.particle_field_offsets.keys()))
+        self.particle_field_list = list(dsl)
+        self.field_list = [("ramses", f) for f in self.fluid_field_list] \
+                        + self.particle_field_list
+
+    def _setup_auto_fields(self):
+        '''
+        If no fluid fields are set, the code tries to set up a fluids array by hand
+        '''
+        # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
+        # Find nvar
+        
+
+        # TODO: copy/pasted from DomainFile; needs refactoring!
+        num = os.path.basename(self.dataset.parameter_filename).split("."
+                )[0].split("_")[1]
+        testdomain = 1 # Just pick the first domain file to read
+        basename = "%s/%%s_%s.out%05i" % (
+            os.path.abspath(
+              os.path.dirname(self.dataset.parameter_filename)),
+            num, testdomain)
+        hydro_fn = basename % "hydro"
+        # Do we have a hydro file?
+        if not os.path.exists(hydro_fn):
+            self.fluid_field_list = []
+            return
+        # Read the number of hydro  variables
+        f = open(hydro_fn, "rb")
+        hydro_header = ( ('ncpu', 1, 'i'),
+                         ('nvar', 1, 'i'),
+                         ('ndim', 1, 'i'),
+                         ('nlevelmax', 1, 'i'),
+                         ('nboundary', 1, 'i'),
+                         ('gamma', 1, 'd')
+                         )
+        hvals = fpu.read_attrs(f, hydro_header)
+        self.ds.gamma = hvals['gamma']
+        nvar = hvals['nvar']
+        # OK, we got NVAR, now set up the arrays depending on what NVAR is
+        # Allow some wiggle room for users to add too many variables
+        if nvar < 5:
+            mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+            raise ValueError
+        # Basic hydro runs
+        if nvar == 5:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "Pressure"]
+        if nvar > 5 and nvar < 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "Pressure", "Metallicity"]
+        # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+        if nvar == 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+                      "Pressure"]
+        if nvar > 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+                      "Pressure","Metallicity"]
+        while len(fields) < nvar:
+            fields.append("var"+str(len(fields)))
+        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
+        self.fluid_field_list = fields
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            domains = [dom for dom in self.domains if
+                       dom.included(dobj.selector)]
+            base_region = getattr(dobj, "base_region", dobj)
+            if len(domains) > 1:
+                mylog.debug("Identified %s intersecting domains", len(domains))
+            subsets = [RAMSESDomainSubset(base_region, domain, self.dataset)
+                       for domain in domains]
+            dobj._chunk_info = subsets
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, None)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield YTDataChunk(dobj, "spatial", [g], None)
+
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+class RAMSESDataset(Dataset):
+    _index_class = RAMSESIndex
+    _field_info_class = RAMSESFieldInfo
+    gamma = 1.4 # This will get replaced on hydro_fn open
+    
+    def __init__(self, filename, dataset_type='ramses',
+                 fields = None, storage_filename = None,
+                 units_override=None):
+        # Here we want to initiate a traceback, if the reader is not built.
+        if isinstance(fields, types.StringTypes):
+            fields = field_aliases[fields]
+        '''
+        fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
+                If set to None, will try a default set of fields
+        '''
+        self.fluid_types += ("ramses",)
+        self._fields_in_file = fields
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
+        self.storage_filename = storage_filename
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+
+    def _set_code_unit_attributes(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        # Note that unit_l *already* converts to proper!
+        # Also note that unit_l must be multiplied by the boxlen parameter to
+        # ensure we are correctly set up for the current domain.
+<<<<<<< local
+        length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
+        rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
+        # We're not multiplying by the boxlength here.
+        mass_unit = rho_u * length_unit**3
+        # Cosmological runs are done in lookback conformal time. 
+        #To convert to proper time, the time unit is calculated from 
+        # the expansion factor.
+=======
+        length_unit = self.parameters['unit_l']
+        boxlen = self.parameters['boxlen']
+        density_unit = self.parameters['unit_d']
+        mass_unit = density_unit * (length_unit * boxlen)**3
+>>>>>>> other
+        time_unit = self.parameters['unit_t']
+        magnetic_unit = np.sqrt(4*np.pi * mass_unit /
+                                (time_unit**2 * length_unit))
+        pressure_unit = density_unit * (length_unit / time_unit)**2
+        # TODO:
+        # Generalize the temperature field to account for ionization
+        # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
+        mean_molecular_weight_factor = _X**-1
+
+        self.density_unit = self.quan(density_unit, 'g/cm**3')
+        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        self.length_unit = self.quan(length_unit * boxlen, "cm")
+        self.mass_unit = self.quan(mass_unit, "g")
+        self.time_unit = self.quan(time_unit, "s")
+        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
+        self.temperature_unit = (self.velocity_unit**2 * mp *
+                                 mean_molecular_weight_factor / kb)
+        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
+
+    def _parse_parameter_file(self):
+        # hardcoded for now
+        # These should be explicitly obtained from the file, but for now that
+        # will wait until a reorganization of the source tree and better
+        # generalization.
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = 'ramses'
+        self.parameters["Time"] = 1. # default unit is 1...
+
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        # We now execute the same logic Oliver's code does
+        rheader = {}
+        f = open(self.parameter_filename)
+        def read_rhs(cast):
+            line = f.readline()
+            p, v = line.split("=")
+            rheader[p.strip()] = cast(v)
+        for i in range(6): read_rhs(int)
+        f.readline()
+        for i in range(11): read_rhs(float)
+        f.readline()
+        read_rhs(str)
+        # This next line deserves some comment.  We specify a min_level that
+        # corresponds to the minimum level in the RAMSES simulation.  RAMSES is
+        # one-indexed, but it also does refer to the *oct* dimensions -- so
+        # this means that a levelmin of 1 would have *1* oct in it.  So a
+        # levelmin of 2 would have 8 octs at the root mesh level.
+        self.min_level = rheader['levelmin'] - 1
+        # Now we read the hilbert indices
+        self.hilbert_indices = {}
+        if rheader['ordering type'] == "hilbert":
+            f.readline() # header
+            for n in range(rheader['ncpu']):
+                dom, mi, ma = f.readline().split()
+                self.hilbert_indices[int(dom)] = (float(mi), float(ma))
+        self.parameters.update(rheader)
+        self.current_time = self.parameters['time'] 
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_dimensions = np.ones(3, dtype='int32') * \
+                        2**(self.min_level+1)
+        self.domain_right_edge = np.ones(3, dtype='float64')
+        # This is likely not true, but it's not clear how to determine the boundary conditions
+        self.periodicity = (True, True, True)
+        # These conditions seem to always be true for non-cosmological datasets
+        if rheader["time"] > 0 and rheader["H0"] == 1 and rheader["aexp"] == 1:
+            self.cosmological_simulation = 0
+            self.current_redshift = 0
+            self.hubble_constant = 0
+            self.omega_matter = 0
+            self.omega_lambda = 0
+        else:
+            self.cosmological_simulation = 1
+            self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
+            self.omega_lambda = rheader["omega_l"]
+            self.omega_matter = rheader["omega_m"]
+            self.hubble_constant = rheader["H0"] / 100.0 # This is H100
+        self.max_level = rheader['levelmax'] - self.min_level - 1
+        f.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not os.path.basename(args[0]).startswith("info_"): return False
+        fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
+        return os.path.exists(fn)

diff -r 7f61f5a20c2cfa2e632054578b572bdf36f75c61 -r b416c9046eb4860f7552a7dd76ce437c8138e034 yt/frontends/ramses/data_structures.py~
--- /dev/null
+++ b/yt/frontends/ramses/data_structures.py~
@@ -0,0 +1,563 @@
+"""
+RAMSES-specific data structures
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+import stat
+import weakref
+import cStringIO
+import scipy.integrate
+
+from yt.funcs import *
+from yt.geometry.oct_geometry_handler import \
+    OctreeIndex
+from yt.geometry.geometry_handler import \
+    Index, YTDataChunk
+from yt.data_objects.static_output import \
+    Dataset
+from yt.data_objects.octree_subset import \
+    OctreeSubset
+
+from .definitions import ramses_header, field_aliases
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from .fields import \
+    RAMSESFieldInfo
+import yt.utilities.fortran_utils as fpu
+from yt.geometry.oct_container import \
+    RAMSESOctreeContainer
+from yt.fields.particle_fields import \
+    standard_particle_fields
+
+class RAMSESDomainFile(object):
+    _last_mask = None
+    _last_selector_id = None
+
+    def __init__(self, ds, domain_id):
+        self.ds = ds
+        self.domain_id = domain_id
+        self.nvar = 0 # Set this later!
+        num = os.path.basename(ds.parameter_filename).split("."
+                )[0].split("_")[1]
+        basename = "%s/%%s_%s.out%05i" % (
+            os.path.abspath(
+              os.path.dirname(ds.parameter_filename)),
+            num, domain_id)
+        for t in ['grav', 'hydro', 'part', 'amr']:
+            setattr(self, "%s_fn" % t, basename % t)
+        self._read_amr_header()
+        self._read_hydro_header()
+        self._read_particle_header()
+        self._read_amr()
+
+    _hydro_offset = None
+    _level_count = None
+
+    def __repr__(self):
+        return "RAMSESDomainFile: %i" % self.domain_id
+
+    def _is_hydro(self):
+        '''
+        Does the output include hydro?
+        '''
+        return os.path.exists(self.hydro_fn)
+
+    @property
+    def level_count(self):
+        if self._level_count is not None: return self._level_count
+        self.hydro_offset
+        return self._level_count
+
+    @property
+    def hydro_offset(self):
+        if self._hydro_offset is not None: return self._hydro_offset
+        # We now have to open the file and calculate it
+        f = open(self.hydro_fn, "rb")
+        fpu.skip(f, 6)
+        # It goes: level, CPU, 8-variable
+        min_level = self.ds.min_level
+        n_levels = self.amr_header['nlevelmax'] - min_level
+        hydro_offset = np.zeros(n_levels, dtype='int64')
+        hydro_offset -= 1
+        level_count = np.zeros(n_levels, dtype='int64')
+        skipped = []
+        for level in range(self.amr_header['nlevelmax']):
+            for cpu in range(self.amr_header['nboundary'] +
+                             self.amr_header['ncpu']):
+                header = ( ('file_ilevel', 1, 'I'),
+                           ('file_ncache', 1, 'I') )
+                try:
+                    hvals = fpu.read_attrs(f, header, "=")
+                except AssertionError:
+                    print "You are running with the wrong number of fields."
+                    print "If you specified these in the load command, check the array length."
+                    print "In this file there are %s hydro fields." % skipped
+                    #print "The last set of field sizes was: %s" % skipped
+                    raise
+                if hvals['file_ncache'] == 0: continue
+                assert(hvals['file_ilevel'] == level+1)
+                if cpu + 1 == self.domain_id and level >= min_level:
+                    hydro_offset[level - min_level] = f.tell()
+                    level_count[level - min_level] = hvals['file_ncache']
+                skipped = fpu.skip(f, 8 * self.nvar)
+        self._hydro_offset = hydro_offset
+        self._level_count = level_count
+        return self._hydro_offset
+
+    def _read_hydro_header(self):
+        # If no hydro file is found, return
+        if not self._is_hydro():
+            return
+        if self.nvar > 0: return self.nvar
+        # Read the number of hydro  variables
+        f = open(self.hydro_fn, "rb")
+        fpu.skip(f, 1)
+        self.nvar = fpu.read_vector(f, "i")[0]
+
+    def _read_particle_header(self):
+        if not os.path.exists(self.part_fn):
+            self.local_particle_count = 0
+            self.particle_field_offsets = {}
+            return
+        f = open(self.part_fn, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
+        hvals = {}
+        attrs = ( ('ncpu', 1, 'I'),
+                  ('ndim', 1, 'I'),
+                  ('npart', 1, 'I') )
+        hvals.update(fpu.read_attrs(f, attrs))
+        fpu.read_vector(f, 'I')
+
+        attrs = ( ('nstar_tot', 1, 'I'),
+                  ('mstar_tot', 1, 'd'),
+                  ('mstar_lost', 1, 'd'),
+                  ('nsink', 1, 'I') )
+        hvals.update(fpu.read_attrs(f, attrs))
+        self.particle_header = hvals
+        self.local_particle_count = hvals['npart']
+        particle_fields = [
+                ("particle_position_x", "d"),
+                ("particle_position_y", "d"),
+                ("particle_position_z", "d"),
+                ("particle_velocity_x", "d"),
+                ("particle_velocity_y", "d"),
+                ("particle_velocity_z", "d"),
+                ("particle_mass", "d"),
+                ("particle_identifier", "I"),
+                ("particle_refinement_level", "I")]
+        if hvals["nstar_tot"] > 0:
+            particle_fields += [("particle_age", "d"),
+                                ("particle_metallicity", "d")]
+        field_offsets = {}
+        _pfields = {}
+        for field, vtype in particle_fields:
+            if f.tell() >= flen: break
+            field_offsets["io", field] = f.tell()
+            _pfields["io", field] = vtype
+            fpu.skip(f, 1)
+        self.particle_field_offsets = field_offsets
+        self.particle_field_types = _pfields
+        self.particle_types = self.particle_types_raw = ("io",)
+
+    def _read_amr_header(self):
+        hvals = {}
+        f = open(self.amr_fn, "rb")
+        for header in ramses_header(hvals):
+            hvals.update(fpu.read_attrs(f, header))
+        # That's the header, now we skip a few.
+        hvals['numbl'] = np.array(hvals['numbl']).reshape(
+            (hvals['nlevelmax'], hvals['ncpu']))
+        fpu.skip(f)
+        if hvals['nboundary'] > 0:
+            fpu.skip(f, 2)
+            self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
+        else:
+            self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
+        free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
+        ordering = fpu.read_vector(f, 'c')
+        fpu.skip(f, 4)
+        # Now we're at the tree itself
+        # Now we iterate over each level and each CPU.
+        self.amr_header = hvals
+        self.amr_offset = f.tell()
+        self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
+        self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
+
+    def _read_amr(self):
+        """Open the oct file, read in octs level-by-level.
+           For each oct, only the position, index, level and domain 
+           are needed - its position in the octree is found automatically.
+           The most important is finding all the information to feed
+           oct_handler.add
+        """
+        self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
+                self.ds.domain_left_edge, self.ds.domain_right_edge)
+        root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
+        self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
+        fb = open(self.amr_fn, "rb")
+        fb.seek(self.amr_offset)
+        f = cStringIO.StringIO()
+        f.write(fb.read())
+        f.seek(0)
+        mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
+            self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
+        def _ng(c, l):
+            if c < self.amr_header['ncpu']:
+                ng = self.amr_header['numbl'][l, c]
+            else:
+                ng = self.ngridbound[c - self.amr_header['ncpu'] +
+                                self.amr_header['nboundary']*l]
+            return ng
+        min_level = self.ds.min_level
+        max_level = min_level
+        nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
+        for level in range(self.amr_header['nlevelmax']):
+            # Easier if do this 1-indexed
+            for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
+                #ng is the number of octs on this level on this domain
+                ng = _ng(cpu, level)
+                if ng == 0: continue
+                ind = fpu.read_vector(f, "I").astype("int64")
+                fpu.skip(f, 2)
+                pos = np.empty((ng, 3), dtype='float64')
+                pos[:,0] = fpu.read_vector(f, "d") - nx
+                pos[:,1] = fpu.read_vector(f, "d") - ny
+                pos[:,2] = fpu.read_vector(f, "d") - nz
+                #pos *= self.ds.domain_width
+                #pos += self.dataset.domain_left_edge
+                fpu.skip(f, 31)
+                #parents = fpu.read_vector(f, "I")
+                #fpu.skip(f, 6)
+                #children = np.empty((ng, 8), dtype='int64')
+                #for i in range(8):
+                #    children[:,i] = fpu.read_vector(f, "I")
+                #cpu_map = np.empty((ng, 8), dtype="int64")
+                #for i in range(8):
+                #    cpu_map[:,i] = fpu.read_vector(f, "I")
+                #rmap = np.empty((ng, 8), dtype="int64")
+                #for i in range(8):
+                #    rmap[:,i] = fpu.read_vector(f, "I")
+                # We don't want duplicate grids.
+                # Note that we're adding *grids*, not individual cells.
+                if level >= min_level:
+                    assert(pos.shape[0] == ng)
+                    n = self.oct_handler.add(cpu + 1, level - min_level, pos,
+                                count_boundary = 1)
+                    self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
+                    if n > 0: max_level = max(level - min_level, max_level)
+        self.max_level = max_level
+        self.oct_handler.finalize()
+
+    def _error_check(self, cpu, level, pos, n, ng, nn):
+        # NOTE: We have the second conditional here because internally, it will
+        # not add any octs in that case.
+        if n == ng or cpu + 1 > self.oct_handler.num_domains:
+            return
+        # This is where we now check for issues with creating the new octs, and
+        # we attempt to determine what precisely is going wrong.
+        # These are all print statements.
+        print "We have detected an error with the construction of the Octree."
+        print "  The number of Octs to be added :  %s" % ng
+        print "  The number of Octs added       :  %s" % n
+        print "  Level                          :  %s" % level
+        print "  CPU Number (0-indexed)         :  %s" % cpu
+        for i, ax in enumerate('xyz'):
+            print "  extent [%s]                     :  %s %s" % \
+            (ax, pos[:,i].min(), pos[:,i].max())
+        print "  domain left                    :  %s" % \
+            (self.ds.domain_left_edge,)
+        print "  domain right                   :  %s" % \
+            (self.ds.domain_right_edge,)
+        print "  offset applied                 :  %s %s %s" % \
+            (nn[0], nn[1], nn[2])
+        print "AMR Header:"
+        for key in sorted(self.amr_header):
+            print "   %-30s: %s" % (key, self.amr_header[key])
+        raise RuntimeError
+
+    def included(self, selector):
+        if getattr(selector, "domain_id", None) is not None:
+            return selector.domain_id == self.domain_id
+        domain_ids = self.oct_handler.domain_identify(selector)
+        return self.domain_id in domain_ids
+
+class RAMSESDomainSubset(OctreeSubset):
+
+    _domain_offset = 1
+
+    def fill(self, content, fields, selector):
+        # Here we get a copy of the file, which we skip through and read the
+        # bits we want.
+        oct_handler = self.oct_handler
+        all_fields = self.domain.ds.index.fluid_field_list
+        fields = [f for ft, f in fields]
+        tr = {}
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
+        for field in fields:
+            tr[field] = np.zeros(cell_count, 'float64')
+        for level, offset in enumerate(self.domain.hydro_offset):
+            if offset == -1: continue
+            content.seek(offset)
+            nc = self.domain.level_count[level]
+            temp = {}
+            for field in all_fields:
+                temp[field] = np.empty((nc, 8), dtype="float64")
+            for i in range(8):
+                for field in all_fields:
+                    if field not in fields:
+                        fpu.skip(content)
+                    else:
+                        temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
+            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
+        return tr
+
+class RAMSESIndex(OctreeIndex):
+
+    def __init__(self, ds, dataset_type='ramses'):
+        self._ds = ds # TODO: Figure out the class composition better!
+        self.fluid_field_list = ds._fields_in_file
+        self.dataset_type = dataset_type
+        self.dataset = weakref.proxy(ds)
+        self.index_filename = self.dataset.parameter_filename
+        self.directory = os.path.dirname(self.index_filename)
+        self.max_level = None
+
+        self.float_type = np.float64
+        super(RAMSESIndex, self).__init__(ds, dataset_type)
+
+    def _initialize_oct_handler(self):
+        self.domains = [RAMSESDomainFile(self.dataset, i + 1)
+                        for i in range(self.dataset['ncpu'])]
+        total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
+                         for dom in self.domains)
+        self.max_level = max(dom.max_level for dom in self.domains)
+        self.num_grids = total_octs
+
+    def _detect_output_fields(self):
+        # Do we want to attempt to figure out what the fields are in the file?
+        dsl = set([])
+        if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
+            self._setup_auto_fields()
+        for domain in self.domains:
+            dsl.update(set(domain.particle_field_offsets.keys()))
+        self.particle_field_list = list(dsl)
+        self.field_list = [("ramses", f) for f in self.fluid_field_list] \
+                        + self.particle_field_list
+
+    def _setup_auto_fields(self):
+        '''
+        If no fluid fields are set, the code tries to set up a fluids array by hand
+        '''
+        # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
+        # Find nvar
+        
+
+        # TODO: copy/pasted from DomainFile; needs refactoring!
+        num = os.path.basename(self._ds.parameter_filename).split("."
+                )[0].split("_")[1]
+        testdomain = 1 # Just pick the first domain file to read
+        basename = "%s/%%s_%s.out%05i" % (
+            os.path.abspath(
+              os.path.dirname(self._ds.parameter_filename)),
+            num, testdomain)
+        hydro_fn = basename % "hydro"
+        # Do we have a hydro file?
+        if not os.path.exists(hydro_fn):
+            self.fluid_field_list = []
+            return
+        # Read the number of hydro  variables
+        f = open(hydro_fn, "rb")
+        hydro_header = ( ('ncpu', 1, 'i'),
+                         ('nvar', 1, 'i'),
+                         ('ndim', 1, 'i'),
+                         ('nlevelmax', 1, 'i'),
+                         ('nboundary', 1, 'i'),
+                         ('gamma', 1, 'd')
+                         )
+        hvals = fpu.read_attrs(f, hydro_header)
+        self.ds.gamma = hvals['gamma']
+        nvar = hvals['nvar']
+        # OK, we got NVAR, now set up the arrays depending on what NVAR is
+        # Allow some wiggle room for users to add too many variables
+        if nvar < 5:
+            mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+            raise ValueError
+        # Basic hydro runs
+        if nvar == 5:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "Pressure"]
+        if nvar > 5 and nvar < 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "Pressure", "Metallicity"]
+        # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+        if nvar == 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+                      "Pressure"]
+        if nvar > 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+                      "Pressure","Metallicity"]
+        while len(fields) < nvar:
+            fields.append("var"+str(len(fields)))
+        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
+        self.fluid_field_list = fields
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            domains = [dom for dom in self.domains if
+                       dom.included(dobj.selector)]
+            base_region = getattr(dobj, "base_region", dobj)
+            if len(domains) > 1:
+                mylog.debug("Identified %s intersecting domains", len(domains))
+            subsets = [RAMSESDomainSubset(base_region, domain, self.dataset)
+                       for domain in domains]
+            dobj._chunk_info = subsets
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, None)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield YTDataChunk(dobj, "spatial", [g], None)
+
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+class RAMSESDataset(Dataset):
+    _index_class = RAMSESIndex
+    _field_info_class = RAMSESFieldInfo
+    gamma = 1.4 # This will get replaced on hydro_fn open
+    
+    def __init__(self, filename, dataset_type='ramses',
+                 fields = None, storage_filename = None):
+        # Here we want to initiate a traceback, if the reader is not built.
+        if isinstance(fields, types.StringTypes):
+            fields = field_aliases[fields]
+        '''
+        fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
+                If set to None, will try a default set of fields
+        '''
+        self.fluid_types += ("ramses",)
+        self._fields_in_file = fields
+        Dataset.__init__(self, filename, dataset_type)
+        self.storage_filename = storage_filename
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+
+    def _set_code_unit_attributes(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        # Note that unit_l *already* converts to proper!
+        # Also note that unit_l must be multiplied by the boxlen parameter to
+        # ensure we are correctly set up for the current domain.
+        length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
+        rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
+        # We're not multiplying by the boxlength here.
+        mass_unit = rho_u * self.parameters['unit_l']**3
+        # Cosmological runs are done in lookback conformal time. 
+        #To convert to proper time, the time unit is calculated from 
+        # the expansion factor.
+        time_unit = self.parameters['unit_t']
+        magnetic_unit = np.sqrt(4*np.pi * mass_unit /
+                                (time_unit**2 * length_unit))
+        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        self.length_unit = self.quan(length_unit, "cm")
+        self.mass_unit = self.quan(mass_unit, "g")
+        self.time_unit = self.quan(time_unit, "s")
+        self.velocity_unit = self.length_unit / self.time_unit
+
+    def _parse_parameter_file(self):
+        # hardcoded for now
+        # These should be explicitly obtained from the file, but for now that
+        # will wait until a reorganization of the source tree and better
+        # generalization.
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = 'ramses'
+        self.parameters["Time"] = 1. # default unit is 1...
+
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        # We now execute the same logic Oliver's code does
+        rheader = {}
+        f = open(self.parameter_filename)
+        def read_rhs(cast):
+            line = f.readline()
+            p, v = line.split("=")
+            rheader[p.strip()] = cast(v)
+        for i in range(6): read_rhs(int)
+        f.readline()
+        for i in range(11): read_rhs(float)
+        f.readline()
+        read_rhs(str)
+        # This next line deserves some comment.  We specify a min_level that
+        # corresponds to the minimum level in the RAMSES simulation.  RAMSES is
+        # one-indexed, but it also does refer to the *oct* dimensions -- so
+        # this means that a levelmin of 1 would have *1* oct in it.  So a
+        # levelmin of 2 would have 8 octs at the root mesh level.
+        self.min_level = rheader['levelmin'] - 1
+        # Now we read the hilbert indices
+        self.hilbert_indices = {}
+        if rheader['ordering type'] == "hilbert":
+            f.readline() # header
+            for n in range(rheader['ncpu']):
+                dom, mi, ma = f.readline().split()
+                self.hilbert_indices[int(dom)] = (float(mi), float(ma))
+        self.parameters.update(rheader)
+        self.current_time = self.parameters['time'] 
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_dimensions = np.ones(3, dtype='int32') * \
+                        2**(self.min_level+1)
+        self.domain_right_edge = np.ones(3, dtype='float64')
+        # This is likely not true, but I am not sure how to otherwise
+        # distinguish them.
+        mylog.warning("RAMSES frontend assumes all simulations are cosmological!")
+        self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
+        self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
+        self.omega_lambda = rheader["omega_l"]
+        self.omega_matter = rheader["omega_m"]
+        self.hubble_constant = rheader["H0"] / 100.0 # This is H100
+        self.max_level = rheader['levelmax'] - self.min_level
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not os.path.basename(args[0]).startswith("info_"): return False
+        fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
+        return os.path.exists(fn)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/2f08bb95b7f8/
Changeset:   2f08bb95b7f8
Branch:      yt
User:        RicardaBeckmann
Date:        2015-06-05 23:27:58+00:00
Summary:     fix bugs in unit handling for ramses
Affected #:  1 file

diff -r b416c9046eb4860f7552a7dd76ce437c8138e034 -r 2f08bb95b7f85079b204d2789d85e7a744bf49e5 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -494,13 +494,14 @@
         #Please note that for all units given in the info file, the boxlen
         #still needs to be folded in, as shown below!
 
-        length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
-        rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
+        boxlen=self.parameters['boxlen']
+        length_unit = self.parameters['unit_l'] * boxlen
+        density_unit = self.parameters['unit_d']/ boxlen**3
 
         # In the mass unit, the factors of boxlen cancel back out, so this 
         #is equivalent to unit_d*unit_l**3
 
-        mass_unit = rho_u * length_unit**3
+        mass_unit = density_unit * length_unit**3
 
         # Cosmological runs are done in lookback conformal time. 
         # To convert to proper time, the time unit is calculated from 


https://bitbucket.org/yt_analysis/yt/commits/3e207233e7c0/
Changeset:   3e207233e7c0
Branch:      yt
User:        RicardaBeckmann
Date:        2015-06-05 23:35:17+00:00
Summary:     deleted accidentally tracked files
Affected #:  10 files

diff -r 2f08bb95b7f85079b204d2789d85e7a744bf49e5 -r 3e207233e7c01ed0c3a540831f302674a6c0ed47 yt/fields/particle_fields.py~
--- a/yt/fields/particle_fields.py~
+++ /dev/null
@@ -1,576 +0,0 @@
-"""
-These are common particle fields.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-from yt.units.yt_array import YTArray
-from yt.fields.derived_field import \
-    ValidateParameter, \
-    ValidateSpatial
-from yt.utilities.physical_constants import \
-    mass_hydrogen_cgs, \
-    mass_sun_cgs, \
-    mh
-
-from yt.units.yt_array import \
-    uconcatenate
-
-from yt.utilities.math_utils import \
-    get_sph_r_component, \
-    get_sph_theta_component, \
-    get_sph_phi_component, \
-    get_cyl_r_component, \
-    get_cyl_z_component, \
-    get_cyl_theta_component, \
-    get_cyl_r, get_cyl_theta, \
-    get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi, \
-    periodic_dist, euclidean_dist
-
-from .vector_operations import \
-     create_magnitude_field
-    
-def _field_concat(fname):
-    def _AllFields(field, data):
-        v = []
-        for ptype in data.ds.particle_types:
-            data.ds._last_freq = (ptype, None)
-            if ptype == "all" or \
-                ptype in data.ds.known_filters:
-                  continue
-            v.append(data[ptype, fname].copy())
-        rv = uconcatenate(v, axis=0)
-        return rv
-    return _AllFields
-
-def _field_concat_slice(fname, axi):
-    def _AllFields(field, data):
-        v = []
-        for ptype in data.ds.particle_types:
-            data.ds._last_freq = (ptype, None)
-            if ptype == "all" or \
-                ptype in data.ds.known_filters:
-                  continue
-            v.append(data[ptype, fname][:,axi])
-        rv = uconcatenate(v, axis=0)
-        return rv
-    return _AllFields
-
-def particle_deposition_functions(ptype, coord_name, mass_name, registry):
-    orig = set(registry.keys())
-    ptype_dn = ptype.replace("_","\/").title()
-    def particle_count(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, method = "count")
-        d = data.ds.arr(d, input_units = "cm**-3")
-        return data.apply_units(d, field.units)
-
-    registry.add_field(("deposit", "%s_count" % ptype),
-             function = particle_count,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % ptype_dn)
-
-    def particle_mass(field, data):
-        pos = data[ptype, coord_name]
-        pmass = data[ptype, mass_name].in_units(field.units)
-        d = data.deposit(pos, [pmass], method = "sum")
-        return data.apply_units(d, field.units)
-
-    registry.add_field(("deposit", "%s_mass" % ptype),
-             function = particle_mass,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % ptype_dn,
-             units = "g")
-             
-    def particle_density(field, data):
-        pos = data[ptype, coord_name]
-        mass = data[ptype, mass_name]
-        pos.convert_to_units("code_length")
-        mass.convert_to_units("code_mass")
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        d = data.ds.arr(d, "code_mass")
-        d /= data["index", "cell_volume"]
-        return d
-
-    registry.add_field(("deposit", "%s_density" % ptype),
-             function = particle_density,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Density}" % ptype_dn,
-             units = "g/cm**3")
-
-    def particle_cic(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
-        d = data.apply_units(d, data[ptype, mass_name].units)
-        d /= data["index", "cell_volume"]
-        return d
-
-    registry.add_field(("deposit", "%s_cic" % ptype),
-             function = particle_cic,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s CIC Density}" % ptype_dn,
-             units = "g/cm**3")
-
-    def _get_density_weighted_deposit_field(fname, units, method):
-        def _deposit_field(field, data):
-            """
-            Create a grid field for particle quantities weighted by particle
-            mass, using cloud-in-cell deposit.
-            """
-            pos = data[ptype, "particle_position"]
-            # Get back into density
-            pden = data[ptype, 'particle_mass']
-            top = data.deposit(pos, [data[(ptype, fname)]*pden], method=method)
-            bottom = data.deposit(pos, [pden], method=method)
-            top[bottom == 0] = 0.0
-            bnz = bottom.nonzero()
-            top[bnz] /= bottom[bnz]
-            d = data.ds.arr(top, input_units=units)
-            return d
-        return _deposit_field
-
-    for ax in 'xyz':
-        for method, name in zip(("cic", "sum"), ("cic", "nn")):
-            function = _get_density_weighted_deposit_field(
-                "particle_velocity_%s" % ax, "cm/s", method)
-            registry.add_field(
-                ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)),
-                function=function, units="cm/s", take_log=False,
-                validators=[ValidateSpatial(0)])
-
-    # Now some translation functions.
-
-    def particle_ones(field, data):
-        v = np.ones(data[ptype, mass_name].shape, dtype="float64")
-        return data.apply_units(v, field.units)
-
-    registry.add_field((ptype, "particle_ones"),
-                       function = particle_ones,
-                       particle_type = True,
-                       units = "")
-
-    def particle_mesh_ids(field, data):
-        pos = data[ptype, coord_name]
-        ids = np.zeros(pos.shape[0], dtype="float64") - 1
-        # This is float64 in name only.  It will be properly cast inside the
-        # deposit operation.
-        #_ids = ids.view("float64")
-        data.deposit(pos, [ids], method = "mesh_id")
-        return data.apply_units(ids, "")
-    registry.add_field((ptype, "mesh_id"),
-            function = particle_mesh_ids,
-            validators = [ValidateSpatial()],
-            particle_type = True)
-
-    return list(set(registry.keys()).difference(orig))
-
-def particle_scalar_functions(ptype, coord_name, vel_name, registry):
-
-    # Now we have to set up the various velocity and coordinate things.  In the
-    # future, we'll actually invert this and use the 3-component items
-    # elsewhere, and stop using these.
-    
-    # Note that we pass in _ptype here so that it's defined inside the closure.
-
-    def _get_coord_funcs(axi, _ptype):
-        def _particle_velocity(field, data):
-            return data[_ptype, vel_name][:,axi]
-        def _particle_position(field, data):
-            return data[_ptype, coord_name][:,axi]
-        return _particle_velocity, _particle_position
-    for axi, ax in enumerate("xyz"):
-        v, p = _get_coord_funcs(axi, ptype)
-        registry.add_field((ptype, "particle_velocity_%s" % ax),
-            particle_type = True, function = v,
-            units = "code_velocity")
-        registry.add_field((ptype, "particle_position_%s" % ax),
-            particle_type = True, function = p,
-            units = "code_length")
-
-def particle_vector_functions(ptype, coord_names, vel_names, registry):
-
-    # This will column_stack a set of scalars to create vector fields.
-
-    def _get_vec_func(_ptype, names):
-        def particle_vectors(field, data):
-            v = [data[_ptype, name].in_units(field.units)
-                  for name in names]
-            c = np.column_stack(v)
-            return data.apply_units(c, field.units)
-        return particle_vectors
-    registry.add_field((ptype, "particle_position"),
-                       function=_get_vec_func(ptype, coord_names),
-                       units = "code_length",
-                       particle_type=True)
-    registry.add_field((ptype, "particle_velocity"),
-                       function=_get_vec_func(ptype, vel_names),
-                       units = "cm / s",
-                       particle_type=True)
-
-def standard_particle_fields(registry, ptype,
-                             spos = "particle_position_%s",
-                             svel = "particle_velocity_%s"):
-    # This function will set things up based on the scalar fields and standard
-    # yt field names.
-
-    def _particle_velocity_magnitude(field, data):
-        """ M{|v|} """
-        bulk_velocity = data.get_field_parameter("bulk_velocity")
-        if bulk_velocity is None:
-            bulk_velocity = np.zeros(3)
-        return np.sqrt((data[ptype, svel % 'x'] - bulk_velocity[0])**2
-                     + (data[ptype, svel % 'y'] - bulk_velocity[1])**2
-                     + (data[ptype, svel % 'z'] - bulk_velocity[2])**2 )
-    
-    registry.add_field((ptype, "particle_velocity_magnitude"),
-                  function=_particle_velocity_magnitude,
-                  particle_type=True,
-                  take_log=False,
-                  units="cm/s")
-
-    def _particle_specific_angular_momentum(field, data):
-        """
-        Calculate the angular of a particle velocity.  Returns a vector for each
-        particle.
-        """
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        xv = data[ptype, svel % 'x'] - bv[0]
-        yv = data[ptype, svel % 'y'] - bv[1]
-        zv = data[ptype, svel % 'z'] - bv[2]
-        center = data.get_field_parameter('center')
-        coords = YTArray([data[ptype, spos % 'x'],
-                           data[ptype, spos % 'y'],
-                           data[ptype, spos % 'z']], dtype=np.float64)
-        new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-        r_vec = coords - np.reshape(center,new_shape)
-        v_vec = YTArray([xv,yv,zv], dtype=np.float64)
-        return np.cross(r_vec, v_vec, axis=0)
-
-    registry.add_field((ptype, "particle_specific_angular_momentum"),
-              function=_particle_specific_angular_momentum,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    def _particle_specific_angular_momentum_x(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        center = data.get_field_parameter('center')
-        y = data[ptype, spos % "y"] - center[1]
-        z = data[ptype, spos % "z"] - center[2]
-        yv = data[ptype, svel % "y"] - bv[1]
-        zv = data[ptype, svel % "z"] - bv[2]
-        return yv*z - zv*y
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_x"),
-              function=_particle_specific_angular_momentum_x,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    def _particle_specific_angular_momentum_y(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        center = data.get_field_parameter('center')
-        x = data[ptype, spos % "x"] - center[0]
-        z = data[ptype, spos % "z"] - center[2]
-        xv = data[ptype, svel % "x"] - bv[0]
-        zv = data[ptype, svel % "z"] - bv[2]
-        return -(xv*z - zv*x)
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_y"),
-              function=_particle_specific_angular_momentum_y,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    def _particle_specific_angular_momentum_z(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        center = data.get_field_parameter('center')
-        x = data[ptype, spos % "x"] - center[0]
-        y = data[ptype, spos % "y"] - center[1]
-        xv = data[ptype, svel % "x"] - bv[0]
-        yv = data[ptype, svel % "y"] - bv[1]
-        return xv*y - yv*x
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_z"),
-              function=_particle_specific_angular_momentum_z,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    create_magnitude_field(registry, "particle_specific_angular_momentum",
-                           "cm**2/s", ftype=ptype, particle_type=True)
-    
-    def _particle_angular_momentum_x(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_x"]
-    registry.add_field((ptype, "particle_angular_momentum_x"),
-             function=_particle_angular_momentum_x,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
-
-    def _particle_angular_momentum_y(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_y"]
-    registry.add_field((ptype, "particle_angular_momentum_y"),
-             function=_particle_angular_momentum_y,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
-
-    def _particle_angular_momentum_z(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_z"]
-    registry.add_field((ptype, "particle_angular_momentum_z"),
-             function=_particle_angular_momentum_z,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
-
-    def _particle_angular_momentum(field, data):
-        return data[ptype, "particle_mass"] \
-            * data[ptype, "particle_specific_angular_momentum"]
-    registry.add_field((ptype, "particle_angular_momentum"),
-              function=_particle_angular_momentum,
-              particle_type=True,
-              units="g*cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    create_magnitude_field(registry, "particle_angular_momentum",
-                           "g*cm**2/s", ftype=ptype, particle_type=True)
-    
-    from .field_functions import \
-        get_radius
-
-    def _particle_radius(field, data):
-        return get_radius(data, "particle_position_")
-    registry.add_field((ptype, "particle_radius"),
-              function=_particle_radius,
-              validators=[ValidateParameter("center")],
-              units="cm", particle_type = True,
-              display_name = "Particle Radius")
-
-    def _particle_spherical_position_radius(field, data):
-        """
-        Radial component of the particles' position vectors in spherical coords
-        on the provided field parameters for 'normal', 'center', and 
-        'bulk_velocity', 
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
-        pos = pos - np.reshape(center, (3, 1))
-        sphr = get_sph_r_component(pos, theta, phi, normal)
-        return sphr
-
-    registry.add_field((ptype, "particle_spherical_position_radius"),
-              function=_particle_spherical_position_radius,
-              particle_type=True, units="cm",
-              validators=[ValidateParameter("normal"), 
-                          ValidateParameter("center")])
-
-    def _particle_spherical_position_theta(field, data):
-        """
-        Theta component of the particles' position vectors in spherical coords
-        on the provided field parameters for 'normal', 'center', and 
-        'bulk_velocity', 
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
-        pos = pos - np.reshape(center, (3, 1))
-        spht = get_sph_theta_component(pos, theta, phi, normal)
-        return spht
-
-    registry.add_field((ptype, "particle_spherical_position_theta"),
-              function=_particle_spherical_position_theta,
-              particle_type=True, units="cm",
-              validators=[ValidateParameter("normal"), 
-                          ValidateParameter("center")])
-
-    def _particle_spherical_position_phi(field, data):
-        """
-        Phi component of the particles' position vectors in spherical coords
-        on the provided field parameters for 'normal', 'center', and 
-        'bulk_velocity', 
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
-        pos = pos - np.reshape(center, (3, 1))
-        sphp = get_sph_phi_component(pos, phi, normal)
-        return sphp
-
-    registry.add_field((ptype, "particle_spherical_position_phi"),
-              function=_particle_spherical_position_phi,
-              particle_type=True, units="cm",
-              validators=[ValidateParameter("normal"), 
-                          ValidateParameter("center")])
-
-    def _particle_spherical_velocity_radius(field, data):
-        """
-        Radial component of the particles' velocity vectors in spherical coords
-        based on the provided field parameters for 'normal', 'center', and 
-        'bulk_velocity', 
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
-        pos = pos - np.reshape(center, (3, 1))
-        vel = vel - np.reshape(bv, (3, 1))
-        sphr = get_sph_r_component(vel, theta, phi, normal)
-        return sphr
-
-    registry.add_field((ptype, "particle_spherical_velocity_radius"),
-              function=_particle_spherical_velocity_radius,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"), 
-                          ValidateParameter("center")])
-
-    # This is simply aliased to "particle_spherical_velocity_radius"
-    # for ease of use.
-    registry.add_field((ptype, "particle_radial_velocity"),
-              function=_particle_spherical_velocity_radius,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"), 
-                          ValidateParameter("center")])
-
-    def _particle_spherical_velocity_theta(field, data):
-        """
-        Theta component of the particles' velocity vectors in spherical coords
-        based on the provided field parameters for 'normal', 'center', and 
-        'bulk_velocity', 
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
-        pos = pos - np.reshape(center, (3, 1))
-        vel = vel - np.reshape(bv, (3, 1))
-        spht = get_sph_theta_component(vel, theta, phi, normal)
-        return spht
-
-    registry.add_field((ptype, "particle_spherical_velocity_theta"),
-              function=_particle_spherical_velocity_theta,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"), 
-                          ValidateParameter("center")])
-
-    def _particle_spherical_velocity_phi(field, data):
-        """
-        Phi component of the particles' velocity vectors in spherical coords
-        based on the provided field parameters for 'normal', 'center', and 
-        'bulk_velocity', 
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
-        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
-        pos = pos - np.reshape(center, (3, 1))
-        vel = vel - np.reshape(bv, (3, 1))
-        sphp = get_sph_phi_component(vel, phi, normal)
-        return sphp
-
-    registry.add_field((ptype, "particle_spherical_velocity_phi"),
-              function=_particle_spherical_velocity_phi,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"), 
-                          ValidateParameter("center")])
-
-def add_particle_average(registry, ptype, field_name, 
-                         weight = "particle_mass",
-                         density = True):
-    field_units = registry[ptype, field_name].units
-    def _pfunc_avg(field, data):
-        pos = data[ptype, "particle_position"]
-        f = data[ptype, field_name]
-        wf = data[ptype, weight]
-        f *= wf
-        v = data.deposit(pos, [f], method = "sum")
-        w = data.deposit(pos, [wf], method = "sum")
-        v /= w
-        if density: v /= data["index", "cell_volume"]
-        v[np.isnan(v)] = 0.0
-        return v
-    fn = ("deposit", "%s_avg_%s" % (ptype, field_name))
-    registry.add_field(fn, function=_pfunc_avg,
-                       validators = [ValidateSpatial(0)],
-                       particle_type = False,
-                       units = field_units)
-    return fn
-
-def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
-        smoothing_length_name, density_name, smoothed_field, registry,
-        nneighbors = None):
-    field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
-    field_units = registry[ptype, smoothed_field].units
-    def _vol_weight(field, data):
-        pos = data[ptype, coord_name].in_units("code_length")
-        mass = data[ptype, mass_name].in_cgs()
-        dens = data[ptype, density_name].in_cgs()
-        quan = data[ptype, smoothed_field].in_units(field_units)
-        if smoothing_length_name is None:
-            hsml = np.zeros(quan.shape, dtype='float64') - 1
-            hsml = data.apply_units(hsml, "code_length")
-        else:
-            hsml = data[ptype, smoothing_length_name].in_units("code_length")
-        kwargs = {}
-        if nneighbors:
-            kwargs['nneighbors'] = nneighbors
-        rv = data.smooth(pos, [mass, hsml, dens, quan],
-                         method="volume_weighted",
-                         create_octree = True)[0]
-        rv[np.isnan(rv)] = 0.0
-        # Now some quick unit conversions.
-        rv = data.apply_units(rv, field_units)
-        return rv
-    registry.add_field(field_name, function = _vol_weight,
-                       validators = [ValidateSpatial(0)],
-                       units = field_units)
-    return [field_name]
-

diff -r 2f08bb95b7f85079b204d2789d85e7a744bf49e5 -r 3e207233e7c01ed0c3a540831f302674a6c0ed47 yt/frontends/ramses/data_structures.py.orig
--- a/yt/frontends/ramses/data_structures.py.orig
+++ /dev/null
@@ -1,596 +0,0 @@
-"""
-RAMSES-specific data structures
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import os
-import numpy as np
-import stat
-import weakref
-import cStringIO
-<<<<<<< local
-import scipy.integrate
-
-=======
->>>>>>> other
-from yt.funcs import *
-from yt.geometry.oct_geometry_handler import \
-    OctreeIndex
-from yt.geometry.geometry_handler import \
-    Index, YTDataChunk
-from yt.data_objects.static_output import \
-    Dataset
-from yt.data_objects.octree_subset import \
-    OctreeSubset
-
-from .definitions import ramses_header, field_aliases
-from yt.utilities.lib.misc_utilities import \
-    get_box_grids_level
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.physical_constants import mp, kb
-from .fields import \
-    RAMSESFieldInfo, _X
-import yt.utilities.fortran_utils as fpu
-from yt.geometry.oct_container import \
-    RAMSESOctreeContainer
-from yt.fields.particle_fields import \
-    standard_particle_fields
-
-class RAMSESDomainFile(object):
-    _last_mask = None
-    _last_selector_id = None
-
-    def __init__(self, ds, domain_id):
-        self.ds = ds
-        self.domain_id = domain_id
-        self.nvar = 0 # Set this later!
-        num = os.path.basename(ds.parameter_filename).split("."
-                )[0].split("_")[1]
-        basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(ds.parameter_filename)),
-            num, domain_id)
-        for t in ['grav', 'hydro', 'part', 'amr']:
-            setattr(self, "%s_fn" % t, basename % t)
-        self._read_amr_header()
-        self._read_hydro_header()
-        self._read_particle_header()
-        self._read_amr()
-
-    _hydro_offset = None
-    _level_count = None
-
-    def __repr__(self):
-        return "RAMSESDomainFile: %i" % self.domain_id
-
-    def _is_hydro(self):
-        '''
-        Does the output include hydro?
-        '''
-        return os.path.exists(self.hydro_fn)
-
-    @property
-    def level_count(self):
-        if self._level_count is not None: return self._level_count
-        self.hydro_offset
-        return self._level_count
-
-    @property
-    def hydro_offset(self):
-        if self._hydro_offset is not None: return self._hydro_offset
-        # We now have to open the file and calculate it
-        f = open(self.hydro_fn, "rb")
-        fpu.skip(f, 6)
-        # It goes: level, CPU, 8-variable
-        min_level = self.ds.min_level
-        n_levels = self.amr_header['nlevelmax'] - min_level
-        hydro_offset = np.zeros(n_levels, dtype='int64')
-        hydro_offset -= 1
-        level_count = np.zeros(n_levels, dtype='int64')
-        skipped = []
-        for level in range(self.amr_header['nlevelmax']):
-            for cpu in range(self.amr_header['nboundary'] +
-                             self.amr_header['ncpu']):
-                header = ( ('file_ilevel', 1, 'I'),
-                           ('file_ncache', 1, 'I') )
-                try:
-                    hvals = fpu.read_attrs(f, header, "=")
-                except AssertionError:
-                    print "You are running with the wrong number of fields."
-                    print "If you specified these in the load command, check the array length."
-                    print "In this file there are %s hydro fields." % skipped
-                    #print "The last set of field sizes was: %s" % skipped
-                    raise
-                if hvals['file_ncache'] == 0: continue
-                assert(hvals['file_ilevel'] == level+1)
-                if cpu + 1 == self.domain_id and level >= min_level:
-                    hydro_offset[level - min_level] = f.tell()
-                    level_count[level - min_level] = hvals['file_ncache']
-                skipped = fpu.skip(f, 8 * self.nvar)
-        self._hydro_offset = hydro_offset
-        self._level_count = level_count
-        return self._hydro_offset
-
-    def _read_hydro_header(self):
-        # If no hydro file is found, return
-        if not self._is_hydro():
-            return
-        if self.nvar > 0: return self.nvar
-        # Read the number of hydro  variables
-        f = open(self.hydro_fn, "rb")
-        fpu.skip(f, 1)
-        self.nvar = fpu.read_vector(f, "i")[0]
-
-    def _read_particle_header(self):
-        if not os.path.exists(self.part_fn):
-            self.local_particle_count = 0
-            self.particle_field_offsets = {}
-            return
-        f = open(self.part_fn, "rb")
-        f.seek(0, os.SEEK_END)
-        flen = f.tell()
-        f.seek(0)
-        hvals = {}
-        attrs = ( ('ncpu', 1, 'I'),
-                  ('ndim', 1, 'I'),
-                  ('npart', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        fpu.read_vector(f, 'I')
-
-        attrs = ( ('nstar_tot', 1, 'I'),
-                  ('mstar_tot', 1, 'd'),
-                  ('mstar_lost', 1, 'd'),
-                  ('nsink', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        self.particle_header = hvals
-        self.local_particle_count = hvals['npart']
-        particle_fields = [
-                ("particle_position_x", "d"),
-                ("particle_position_y", "d"),
-                ("particle_position_z", "d"),
-                ("particle_velocity_x", "d"),
-                ("particle_velocity_y", "d"),
-                ("particle_velocity_z", "d"),
-                ("particle_mass", "d"),
-                ("particle_identifier", "I"),
-                ("particle_refinement_level", "I")]
-        if hvals["nstar_tot"] > 0:
-            particle_fields += [("particle_age", "d"),
-                                ("particle_metallicity", "d")]
-
-        field_offsets = {}
-        _pfields = {}
-        for field, vtype in particle_fields:
-            if f.tell() >= flen: break
-            field_offsets["io", field] = f.tell()
-            _pfields["io", field] = vtype
-            fpu.skip(f, 1)
-        self.particle_field_offsets = field_offsets
-        self.particle_field_types = _pfields
-        self.particle_types = self.particle_types_raw = ("io",)
-
-    def _read_amr_header(self):
-        hvals = {}
-        f = open(self.amr_fn, "rb")
-        for header in ramses_header(hvals):
-            hvals.update(fpu.read_attrs(f, header))
-        # That's the header, now we skip a few.
-        hvals['numbl'] = np.array(hvals['numbl']).reshape(
-            (hvals['nlevelmax'], hvals['ncpu']))
-        fpu.skip(f)
-        if hvals['nboundary'] > 0:
-            fpu.skip(f, 2)
-            self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
-        else:
-            self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
-        free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
-        ordering = fpu.read_vector(f, 'c')
-        fpu.skip(f, 4)
-        # Now we're at the tree itself
-        # Now we iterate over each level and each CPU.
-        self.amr_header = hvals
-        self.amr_offset = f.tell()
-        self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
-        self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
-
-    def _read_amr(self):
-        """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain 
-           are needed - its position in the octree is found automatically.
-           The most important is finding all the information to feed
-           oct_handler.add
-        """
-        self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
-                self.ds.domain_left_edge, self.ds.domain_right_edge)
-        root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
-        self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
-        fb = open(self.amr_fn, "rb")
-        fb.seek(self.amr_offset)
-        f = cStringIO.StringIO()
-        f.write(fb.read())
-        f.seek(0)
-        mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
-            self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
-        def _ng(c, l):
-            if c < self.amr_header['ncpu']:
-                ng = self.amr_header['numbl'][l, c]
-            else:
-                ng = self.ngridbound[c - self.amr_header['ncpu'] +
-                                self.amr_header['nboundary']*l]
-            return ng
-        min_level = self.ds.min_level
-        # yt max level is not the same as the RAMSES one.
-        # yt max level is the maximum number of additional refinement levels
-        # so for a uni grid run with no refinement, it would be 0. 
-        # So we initially assume that.
-        max_level = 0
-        nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
-        for level in range(self.amr_header['nlevelmax']):
-            # Easier if do this 1-indexed
-            for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
-                #ng is the number of octs on this level on this domain
-                ng = _ng(cpu, level)
-                if ng == 0: continue
-                ind = fpu.read_vector(f, "I").astype("int64")
-                fpu.skip(f, 2)
-                pos = np.empty((ng, 3), dtype='float64')
-                pos[:,0] = fpu.read_vector(f, "d") - nx
-                pos[:,1] = fpu.read_vector(f, "d") - ny
-                pos[:,2] = fpu.read_vector(f, "d") - nz
-                #pos *= self.ds.domain_width
-                #pos += self.dataset.domain_left_edge
-                fpu.skip(f, 31)
-                #parents = fpu.read_vector(f, "I")
-                #fpu.skip(f, 6)
-                #children = np.empty((ng, 8), dtype='int64')
-                #for i in range(8):
-                #    children[:,i] = fpu.read_vector(f, "I")
-                #cpu_map = np.empty((ng, 8), dtype="int64")
-                #for i in range(8):
-                #    cpu_map[:,i] = fpu.read_vector(f, "I")
-                #rmap = np.empty((ng, 8), dtype="int64")
-                #for i in range(8):
-                #    rmap[:,i] = fpu.read_vector(f, "I")
-                # We don't want duplicate grids.
-                # Note that we're adding *grids*, not individual cells.
-                if level >= min_level:
-                    assert(pos.shape[0] == ng)
-                    n = self.oct_handler.add(cpu + 1, level - min_level, pos,
-                                count_boundary = 1)
-                    self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
-                    if n > 0: max_level = max(level - min_level, max_level)
-        self.max_level = max_level
-        self.oct_handler.finalize()
-
-    def _error_check(self, cpu, level, pos, n, ng, nn):
-        # NOTE: We have the second conditional here because internally, it will
-        # not add any octs in that case.
-        if n == ng or cpu + 1 > self.oct_handler.num_domains:
-            return
-        # This is where we now check for issues with creating the new octs, and
-        # we attempt to determine what precisely is going wrong.
-        # These are all print statements.
-        print "We have detected an error with the construction of the Octree."
-        print "  The number of Octs to be added :  %s" % ng
-        print "  The number of Octs added       :  %s" % n
-        print "  Level                          :  %s" % level
-        print "  CPU Number (0-indexed)         :  %s" % cpu
-        for i, ax in enumerate('xyz'):
-            print "  extent [%s]                     :  %s %s" % \
-            (ax, pos[:,i].min(), pos[:,i].max())
-        print "  domain left                    :  %s" % \
-            (self.ds.domain_left_edge,)
-        print "  domain right                   :  %s" % \
-            (self.ds.domain_right_edge,)
-        print "  offset applied                 :  %s %s %s" % \
-            (nn[0], nn[1], nn[2])
-        print "AMR Header:"
-        for key in sorted(self.amr_header):
-            print "   %-30s: %s" % (key, self.amr_header[key])
-        raise RuntimeError
-
-    def included(self, selector):
-        if getattr(selector, "domain_id", None) is not None:
-            return selector.domain_id == self.domain_id
-        domain_ids = self.oct_handler.domain_identify(selector)
-        return self.domain_id in domain_ids
-
-class RAMSESDomainSubset(OctreeSubset):
-
-    _domain_offset = 1
-
-    def fill(self, content, fields, selector):
-        # Here we get a copy of the file, which we skip through and read the
-        # bits we want.
-        oct_handler = self.oct_handler
-        all_fields = self.domain.ds.index.fluid_field_list
-        fields = [f for ft, f in fields]
-        tr = {}
-        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
-        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
-            selector, self.domain_id, cell_count)
-        for field in fields:
-            tr[field] = np.zeros(cell_count, 'float64')
-        for level, offset in enumerate(self.domain.hydro_offset):
-            if offset == -1: continue
-            content.seek(offset)
-            nc = self.domain.level_count[level]
-            temp = {}
-            for field in all_fields:
-                temp[field] = np.empty((nc, 8), dtype="float64")
-            for i in range(8):
-                for field in all_fields:
-                    if field not in fields:
-                        fpu.skip(content)
-                    else:
-                        temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
-            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
-        return tr
-
-class RAMSESIndex(OctreeIndex):
-
-    def __init__(self, ds, dataset_type='ramses'):
-        self.fluid_field_list = ds._fields_in_file
-        self.dataset_type = dataset_type
-        self.dataset = weakref.proxy(ds)
-        self.index_filename = self.dataset.parameter_filename
-        self.directory = os.path.dirname(self.index_filename)
-        self.max_level = None
-
-        self.float_type = np.float64
-        super(RAMSESIndex, self).__init__(ds, dataset_type)
-
-    def _initialize_oct_handler(self):
-        self.domains = [RAMSESDomainFile(self.dataset, i + 1)
-                        for i in range(self.dataset['ncpu'])]
-        total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
-                         for dom in self.domains)
-        self.max_level = max(dom.max_level for dom in self.domains)
-        self.num_grids = total_octs
-
-    def _detect_output_fields(self):
-        # Do we want to attempt to figure out what the fields are in the file?
-        dsl = set([])
-        if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
-            self._setup_auto_fields()
-        for domain in self.domains:
-            dsl.update(set(domain.particle_field_offsets.keys()))
-        self.particle_field_list = list(dsl)
-        self.field_list = [("ramses", f) for f in self.fluid_field_list] \
-                        + self.particle_field_list
-
-    def _setup_auto_fields(self):
-        '''
-        If no fluid fields are set, the code tries to set up a fluids array by hand
-        '''
-        # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
-        # Find nvar
-        
-
-        # TODO: copy/pasted from DomainFile; needs refactoring!
-        num = os.path.basename(self.dataset.parameter_filename).split("."
-                )[0].split("_")[1]
-        testdomain = 1 # Just pick the first domain file to read
-        basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(self.dataset.parameter_filename)),
-            num, testdomain)
-        hydro_fn = basename % "hydro"
-        # Do we have a hydro file?
-        if not os.path.exists(hydro_fn):
-            self.fluid_field_list = []
-            return
-        # Read the number of hydro  variables
-        f = open(hydro_fn, "rb")
-        hydro_header = ( ('ncpu', 1, 'i'),
-                         ('nvar', 1, 'i'),
-                         ('ndim', 1, 'i'),
-                         ('nlevelmax', 1, 'i'),
-                         ('nboundary', 1, 'i'),
-                         ('gamma', 1, 'd')
-                         )
-        hvals = fpu.read_attrs(f, hydro_header)
-        self.ds.gamma = hvals['gamma']
-        nvar = hvals['nvar']
-        # OK, we got NVAR, now set up the arrays depending on what NVAR is
-        # Allow some wiggle room for users to add too many variables
-        if nvar < 5:
-            mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
-            raise ValueError
-        # Basic hydro runs
-        if nvar == 5:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "Pressure"]
-        if nvar > 5 and nvar < 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "Pressure", "Metallicity"]
-        # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
-        if nvar == 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
-                      "Pressure"]
-        if nvar > 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
-                      "Pressure","Metallicity"]
-        while len(fields) < nvar:
-            fields.append("var"+str(len(fields)))
-        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
-        self.fluid_field_list = fields
-
-    def _identify_base_chunk(self, dobj):
-        if getattr(dobj, "_chunk_info", None) is None:
-            domains = [dom for dom in self.domains if
-                       dom.included(dobj.selector)]
-            base_region = getattr(dobj, "base_region", dobj)
-            if len(domains) > 1:
-                mylog.debug("Identified %s intersecting domains", len(domains))
-            subsets = [RAMSESDomainSubset(base_region, domain, self.dataset)
-                       for domain in domains]
-            dobj._chunk_info = subsets
-        dobj._current_chunk = list(self._chunk_all(dobj))[0]
-
-    def _chunk_all(self, dobj):
-        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, None)
-
-    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
-        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for i,og in enumerate(sobjs):
-            if ngz > 0:
-                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
-            else:
-                g = og
-            yield YTDataChunk(dobj, "spatial", [g], None)
-
-    def _chunk_io(self, dobj, cache = True, local_only = False):
-        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
-
-class RAMSESDataset(Dataset):
-    _index_class = RAMSESIndex
-    _field_info_class = RAMSESFieldInfo
-    gamma = 1.4 # This will get replaced on hydro_fn open
-    
-    def __init__(self, filename, dataset_type='ramses',
-                 fields = None, storage_filename = None,
-                 units_override=None):
-        # Here we want to initiate a traceback, if the reader is not built.
-        if isinstance(fields, types.StringTypes):
-            fields = field_aliases[fields]
-        '''
-        fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
-                If set to None, will try a default set of fields
-        '''
-        self.fluid_types += ("ramses",)
-        self._fields_in_file = fields
-        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
-        self.storage_filename = storage_filename
-
-    def __repr__(self):
-        return self.basename.rsplit(".", 1)[0]
-
-    def _set_code_unit_attributes(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        # Note that unit_l *already* converts to proper!
-        # Also note that unit_l must be multiplied by the boxlen parameter to
-        # ensure we are correctly set up for the current domain.
-<<<<<<< local
-        length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
-        rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
-        # We're not multiplying by the boxlength here.
-        mass_unit = rho_u * length_unit**3
-        # Cosmological runs are done in lookback conformal time. 
-        #To convert to proper time, the time unit is calculated from 
-        # the expansion factor.
-=======
-        length_unit = self.parameters['unit_l']
-        boxlen = self.parameters['boxlen']
-        density_unit = self.parameters['unit_d']
-        mass_unit = density_unit * (length_unit * boxlen)**3
->>>>>>> other
-        time_unit = self.parameters['unit_t']
-        magnetic_unit = np.sqrt(4*np.pi * mass_unit /
-                                (time_unit**2 * length_unit))
-        pressure_unit = density_unit * (length_unit / time_unit)**2
-        # TODO:
-        # Generalize the temperature field to account for ionization
-        # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
-        mean_molecular_weight_factor = _X**-1
-
-        self.density_unit = self.quan(density_unit, 'g/cm**3')
-        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
-        self.length_unit = self.quan(length_unit * boxlen, "cm")
-        self.mass_unit = self.quan(mass_unit, "g")
-        self.time_unit = self.quan(time_unit, "s")
-        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
-        self.temperature_unit = (self.velocity_unit**2 * mp *
-                                 mean_molecular_weight_factor / kb)
-        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
-
-    def _parse_parameter_file(self):
-        # hardcoded for now
-        # These should be explicitly obtained from the file, but for now that
-        # will wait until a reorganization of the source tree and better
-        # generalization.
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = 'ramses'
-        self.parameters["Time"] = 1. # default unit is 1...
-
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        # We now execute the same logic Oliver's code does
-        rheader = {}
-        f = open(self.parameter_filename)
-        def read_rhs(cast):
-            line = f.readline()
-            p, v = line.split("=")
-            rheader[p.strip()] = cast(v)
-        for i in range(6): read_rhs(int)
-        f.readline()
-        for i in range(11): read_rhs(float)
-        f.readline()
-        read_rhs(str)
-        # This next line deserves some comment.  We specify a min_level that
-        # corresponds to the minimum level in the RAMSES simulation.  RAMSES is
-        # one-indexed, but it also does refer to the *oct* dimensions -- so
-        # this means that a levelmin of 1 would have *1* oct in it.  So a
-        # levelmin of 2 would have 8 octs at the root mesh level.
-        self.min_level = rheader['levelmin'] - 1
-        # Now we read the hilbert indices
-        self.hilbert_indices = {}
-        if rheader['ordering type'] == "hilbert":
-            f.readline() # header
-            for n in range(rheader['ncpu']):
-                dom, mi, ma = f.readline().split()
-                self.hilbert_indices[int(dom)] = (float(mi), float(ma))
-        self.parameters.update(rheader)
-        self.current_time = self.parameters['time'] 
-        self.domain_left_edge = np.zeros(3, dtype='float64')
-        self.domain_dimensions = np.ones(3, dtype='int32') * \
-                        2**(self.min_level+1)
-        self.domain_right_edge = np.ones(3, dtype='float64')
-        # This is likely not true, but it's not clear how to determine the boundary conditions
-        self.periodicity = (True, True, True)
-        # These conditions seem to always be true for non-cosmological datasets
-        if rheader["time"] > 0 and rheader["H0"] == 1 and rheader["aexp"] == 1:
-            self.cosmological_simulation = 0
-            self.current_redshift = 0
-            self.hubble_constant = 0
-            self.omega_matter = 0
-            self.omega_lambda = 0
-        else:
-            self.cosmological_simulation = 1
-            self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
-            self.omega_lambda = rheader["omega_l"]
-            self.omega_matter = rheader["omega_m"]
-            self.hubble_constant = rheader["H0"] / 100.0 # This is H100
-        self.max_level = rheader['levelmax'] - self.min_level - 1
-        f.close()
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        if not os.path.basename(args[0]).startswith("info_"): return False
-        fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
-        return os.path.exists(fn)

diff -r 2f08bb95b7f85079b204d2789d85e7a744bf49e5 -r 3e207233e7c01ed0c3a540831f302674a6c0ed47 yt/frontends/ramses/data_structures.py~
--- a/yt/frontends/ramses/data_structures.py~
+++ /dev/null
@@ -1,563 +0,0 @@
-"""
-RAMSES-specific data structures
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import os
-import numpy as np
-import stat
-import weakref
-import cStringIO
-import scipy.integrate
-
-from yt.funcs import *
-from yt.geometry.oct_geometry_handler import \
-    OctreeIndex
-from yt.geometry.geometry_handler import \
-    Index, YTDataChunk
-from yt.data_objects.static_output import \
-    Dataset
-from yt.data_objects.octree_subset import \
-    OctreeSubset
-
-from .definitions import ramses_header, field_aliases
-from yt.utilities.lib.misc_utilities import \
-    get_box_grids_level
-from yt.utilities.io_handler import \
-    io_registry
-from .fields import \
-    RAMSESFieldInfo
-import yt.utilities.fortran_utils as fpu
-from yt.geometry.oct_container import \
-    RAMSESOctreeContainer
-from yt.fields.particle_fields import \
-    standard_particle_fields
-
-class RAMSESDomainFile(object):
-    _last_mask = None
-    _last_selector_id = None
-
-    def __init__(self, ds, domain_id):
-        self.ds = ds
-        self.domain_id = domain_id
-        self.nvar = 0 # Set this later!
-        num = os.path.basename(ds.parameter_filename).split("."
-                )[0].split("_")[1]
-        basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(ds.parameter_filename)),
-            num, domain_id)
-        for t in ['grav', 'hydro', 'part', 'amr']:
-            setattr(self, "%s_fn" % t, basename % t)
-        self._read_amr_header()
-        self._read_hydro_header()
-        self._read_particle_header()
-        self._read_amr()
-
-    _hydro_offset = None
-    _level_count = None
-
-    def __repr__(self):
-        return "RAMSESDomainFile: %i" % self.domain_id
-
-    def _is_hydro(self):
-        '''
-        Does the output include hydro?
-        '''
-        return os.path.exists(self.hydro_fn)
-
-    @property
-    def level_count(self):
-        if self._level_count is not None: return self._level_count
-        self.hydro_offset
-        return self._level_count
-
-    @property
-    def hydro_offset(self):
-        if self._hydro_offset is not None: return self._hydro_offset
-        # We now have to open the file and calculate it
-        f = open(self.hydro_fn, "rb")
-        fpu.skip(f, 6)
-        # It goes: level, CPU, 8-variable
-        min_level = self.ds.min_level
-        n_levels = self.amr_header['nlevelmax'] - min_level
-        hydro_offset = np.zeros(n_levels, dtype='int64')
-        hydro_offset -= 1
-        level_count = np.zeros(n_levels, dtype='int64')
-        skipped = []
-        for level in range(self.amr_header['nlevelmax']):
-            for cpu in range(self.amr_header['nboundary'] +
-                             self.amr_header['ncpu']):
-                header = ( ('file_ilevel', 1, 'I'),
-                           ('file_ncache', 1, 'I') )
-                try:
-                    hvals = fpu.read_attrs(f, header, "=")
-                except AssertionError:
-                    print "You are running with the wrong number of fields."
-                    print "If you specified these in the load command, check the array length."
-                    print "In this file there are %s hydro fields." % skipped
-                    #print "The last set of field sizes was: %s" % skipped
-                    raise
-                if hvals['file_ncache'] == 0: continue
-                assert(hvals['file_ilevel'] == level+1)
-                if cpu + 1 == self.domain_id and level >= min_level:
-                    hydro_offset[level - min_level] = f.tell()
-                    level_count[level - min_level] = hvals['file_ncache']
-                skipped = fpu.skip(f, 8 * self.nvar)
-        self._hydro_offset = hydro_offset
-        self._level_count = level_count
-        return self._hydro_offset
-
-    def _read_hydro_header(self):
-        # If no hydro file is found, return
-        if not self._is_hydro():
-            return
-        if self.nvar > 0: return self.nvar
-        # Read the number of hydro  variables
-        f = open(self.hydro_fn, "rb")
-        fpu.skip(f, 1)
-        self.nvar = fpu.read_vector(f, "i")[0]
-
-    def _read_particle_header(self):
-        if not os.path.exists(self.part_fn):
-            self.local_particle_count = 0
-            self.particle_field_offsets = {}
-            return
-        f = open(self.part_fn, "rb")
-        f.seek(0, os.SEEK_END)
-        flen = f.tell()
-        f.seek(0)
-        hvals = {}
-        attrs = ( ('ncpu', 1, 'I'),
-                  ('ndim', 1, 'I'),
-                  ('npart', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        fpu.read_vector(f, 'I')
-
-        attrs = ( ('nstar_tot', 1, 'I'),
-                  ('mstar_tot', 1, 'd'),
-                  ('mstar_lost', 1, 'd'),
-                  ('nsink', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        self.particle_header = hvals
-        self.local_particle_count = hvals['npart']
-        particle_fields = [
-                ("particle_position_x", "d"),
-                ("particle_position_y", "d"),
-                ("particle_position_z", "d"),
-                ("particle_velocity_x", "d"),
-                ("particle_velocity_y", "d"),
-                ("particle_velocity_z", "d"),
-                ("particle_mass", "d"),
-                ("particle_identifier", "I"),
-                ("particle_refinement_level", "I")]
-        if hvals["nstar_tot"] > 0:
-            particle_fields += [("particle_age", "d"),
-                                ("particle_metallicity", "d")]
-        field_offsets = {}
-        _pfields = {}
-        for field, vtype in particle_fields:
-            if f.tell() >= flen: break
-            field_offsets["io", field] = f.tell()
-            _pfields["io", field] = vtype
-            fpu.skip(f, 1)
-        self.particle_field_offsets = field_offsets
-        self.particle_field_types = _pfields
-        self.particle_types = self.particle_types_raw = ("io",)
-
-    def _read_amr_header(self):
-        hvals = {}
-        f = open(self.amr_fn, "rb")
-        for header in ramses_header(hvals):
-            hvals.update(fpu.read_attrs(f, header))
-        # That's the header, now we skip a few.
-        hvals['numbl'] = np.array(hvals['numbl']).reshape(
-            (hvals['nlevelmax'], hvals['ncpu']))
-        fpu.skip(f)
-        if hvals['nboundary'] > 0:
-            fpu.skip(f, 2)
-            self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
-        else:
-            self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
-        free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
-        ordering = fpu.read_vector(f, 'c')
-        fpu.skip(f, 4)
-        # Now we're at the tree itself
-        # Now we iterate over each level and each CPU.
-        self.amr_header = hvals
-        self.amr_offset = f.tell()
-        self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
-        self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
-
-    def _read_amr(self):
-        """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain 
-           are needed - its position in the octree is found automatically.
-           The most important is finding all the information to feed
-           oct_handler.add
-        """
-        self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
-                self.ds.domain_left_edge, self.ds.domain_right_edge)
-        root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
-        self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
-        fb = open(self.amr_fn, "rb")
-        fb.seek(self.amr_offset)
-        f = cStringIO.StringIO()
-        f.write(fb.read())
-        f.seek(0)
-        mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
-            self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
-        def _ng(c, l):
-            if c < self.amr_header['ncpu']:
-                ng = self.amr_header['numbl'][l, c]
-            else:
-                ng = self.ngridbound[c - self.amr_header['ncpu'] +
-                                self.amr_header['nboundary']*l]
-            return ng
-        min_level = self.ds.min_level
-        max_level = min_level
-        nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
-        for level in range(self.amr_header['nlevelmax']):
-            # Easier if do this 1-indexed
-            for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
-                #ng is the number of octs on this level on this domain
-                ng = _ng(cpu, level)
-                if ng == 0: continue
-                ind = fpu.read_vector(f, "I").astype("int64")
-                fpu.skip(f, 2)
-                pos = np.empty((ng, 3), dtype='float64')
-                pos[:,0] = fpu.read_vector(f, "d") - nx
-                pos[:,1] = fpu.read_vector(f, "d") - ny
-                pos[:,2] = fpu.read_vector(f, "d") - nz
-                #pos *= self.ds.domain_width
-                #pos += self.dataset.domain_left_edge
-                fpu.skip(f, 31)
-                #parents = fpu.read_vector(f, "I")
-                #fpu.skip(f, 6)
-                #children = np.empty((ng, 8), dtype='int64')
-                #for i in range(8):
-                #    children[:,i] = fpu.read_vector(f, "I")
-                #cpu_map = np.empty((ng, 8), dtype="int64")
-                #for i in range(8):
-                #    cpu_map[:,i] = fpu.read_vector(f, "I")
-                #rmap = np.empty((ng, 8), dtype="int64")
-                #for i in range(8):
-                #    rmap[:,i] = fpu.read_vector(f, "I")
-                # We don't want duplicate grids.
-                # Note that we're adding *grids*, not individual cells.
-                if level >= min_level:
-                    assert(pos.shape[0] == ng)
-                    n = self.oct_handler.add(cpu + 1, level - min_level, pos,
-                                count_boundary = 1)
-                    self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
-                    if n > 0: max_level = max(level - min_level, max_level)
-        self.max_level = max_level
-        self.oct_handler.finalize()
-
-    def _error_check(self, cpu, level, pos, n, ng, nn):
-        # NOTE: We have the second conditional here because internally, it will
-        # not add any octs in that case.
-        if n == ng or cpu + 1 > self.oct_handler.num_domains:
-            return
-        # This is where we now check for issues with creating the new octs, and
-        # we attempt to determine what precisely is going wrong.
-        # These are all print statements.
-        print "We have detected an error with the construction of the Octree."
-        print "  The number of Octs to be added :  %s" % ng
-        print "  The number of Octs added       :  %s" % n
-        print "  Level                          :  %s" % level
-        print "  CPU Number (0-indexed)         :  %s" % cpu
-        for i, ax in enumerate('xyz'):
-            print "  extent [%s]                     :  %s %s" % \
-            (ax, pos[:,i].min(), pos[:,i].max())
-        print "  domain left                    :  %s" % \
-            (self.ds.domain_left_edge,)
-        print "  domain right                   :  %s" % \
-            (self.ds.domain_right_edge,)
-        print "  offset applied                 :  %s %s %s" % \
-            (nn[0], nn[1], nn[2])
-        print "AMR Header:"
-        for key in sorted(self.amr_header):
-            print "   %-30s: %s" % (key, self.amr_header[key])
-        raise RuntimeError
-
-    def included(self, selector):
-        if getattr(selector, "domain_id", None) is not None:
-            return selector.domain_id == self.domain_id
-        domain_ids = self.oct_handler.domain_identify(selector)
-        return self.domain_id in domain_ids
-
-class RAMSESDomainSubset(OctreeSubset):
-
-    _domain_offset = 1
-
-    def fill(self, content, fields, selector):
-        # Here we get a copy of the file, which we skip through and read the
-        # bits we want.
-        oct_handler = self.oct_handler
-        all_fields = self.domain.ds.index.fluid_field_list
-        fields = [f for ft, f in fields]
-        tr = {}
-        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
-        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
-            selector, self.domain_id, cell_count)
-        for field in fields:
-            tr[field] = np.zeros(cell_count, 'float64')
-        for level, offset in enumerate(self.domain.hydro_offset):
-            if offset == -1: continue
-            content.seek(offset)
-            nc = self.domain.level_count[level]
-            temp = {}
-            for field in all_fields:
-                temp[field] = np.empty((nc, 8), dtype="float64")
-            for i in range(8):
-                for field in all_fields:
-                    if field not in fields:
-                        fpu.skip(content)
-                    else:
-                        temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
-            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
-        return tr
-
-class RAMSESIndex(OctreeIndex):
-
-    def __init__(self, ds, dataset_type='ramses'):
-        self._ds = ds # TODO: Figure out the class composition better!
-        self.fluid_field_list = ds._fields_in_file
-        self.dataset_type = dataset_type
-        self.dataset = weakref.proxy(ds)
-        self.index_filename = self.dataset.parameter_filename
-        self.directory = os.path.dirname(self.index_filename)
-        self.max_level = None
-
-        self.float_type = np.float64
-        super(RAMSESIndex, self).__init__(ds, dataset_type)
-
-    def _initialize_oct_handler(self):
-        self.domains = [RAMSESDomainFile(self.dataset, i + 1)
-                        for i in range(self.dataset['ncpu'])]
-        total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
-                         for dom in self.domains)
-        self.max_level = max(dom.max_level for dom in self.domains)
-        self.num_grids = total_octs
-
-    def _detect_output_fields(self):
-        # Do we want to attempt to figure out what the fields are in the file?
-        dsl = set([])
-        if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
-            self._setup_auto_fields()
-        for domain in self.domains:
-            dsl.update(set(domain.particle_field_offsets.keys()))
-        self.particle_field_list = list(dsl)
-        self.field_list = [("ramses", f) for f in self.fluid_field_list] \
-                        + self.particle_field_list
-
-    def _setup_auto_fields(self):
-        '''
-        If no fluid fields are set, the code tries to set up a fluids array by hand
-        '''
-        # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
-        # Find nvar
-        
-
-        # TODO: copy/pasted from DomainFile; needs refactoring!
-        num = os.path.basename(self._ds.parameter_filename).split("."
-                )[0].split("_")[1]
-        testdomain = 1 # Just pick the first domain file to read
-        basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(self._ds.parameter_filename)),
-            num, testdomain)
-        hydro_fn = basename % "hydro"
-        # Do we have a hydro file?
-        if not os.path.exists(hydro_fn):
-            self.fluid_field_list = []
-            return
-        # Read the number of hydro  variables
-        f = open(hydro_fn, "rb")
-        hydro_header = ( ('ncpu', 1, 'i'),
-                         ('nvar', 1, 'i'),
-                         ('ndim', 1, 'i'),
-                         ('nlevelmax', 1, 'i'),
-                         ('nboundary', 1, 'i'),
-                         ('gamma', 1, 'd')
-                         )
-        hvals = fpu.read_attrs(f, hydro_header)
-        self.ds.gamma = hvals['gamma']
-        nvar = hvals['nvar']
-        # OK, we got NVAR, now set up the arrays depending on what NVAR is
-        # Allow some wiggle room for users to add too many variables
-        if nvar < 5:
-            mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
-            raise ValueError
-        # Basic hydro runs
-        if nvar == 5:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "Pressure"]
-        if nvar > 5 and nvar < 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "Pressure", "Metallicity"]
-        # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
-        if nvar == 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
-                      "Pressure"]
-        if nvar > 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
-                      "Pressure","Metallicity"]
-        while len(fields) < nvar:
-            fields.append("var"+str(len(fields)))
-        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
-        self.fluid_field_list = fields
-
-    def _identify_base_chunk(self, dobj):
-        if getattr(dobj, "_chunk_info", None) is None:
-            domains = [dom for dom in self.domains if
-                       dom.included(dobj.selector)]
-            base_region = getattr(dobj, "base_region", dobj)
-            if len(domains) > 1:
-                mylog.debug("Identified %s intersecting domains", len(domains))
-            subsets = [RAMSESDomainSubset(base_region, domain, self.dataset)
-                       for domain in domains]
-            dobj._chunk_info = subsets
-        dobj._current_chunk = list(self._chunk_all(dobj))[0]
-
-    def _chunk_all(self, dobj):
-        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, None)
-
-    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
-        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for i,og in enumerate(sobjs):
-            if ngz > 0:
-                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
-            else:
-                g = og
-            yield YTDataChunk(dobj, "spatial", [g], None)
-
-    def _chunk_io(self, dobj, cache = True, local_only = False):
-        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
-
-class RAMSESDataset(Dataset):
-    _index_class = RAMSESIndex
-    _field_info_class = RAMSESFieldInfo
-    gamma = 1.4 # This will get replaced on hydro_fn open
-    
-    def __init__(self, filename, dataset_type='ramses',
-                 fields = None, storage_filename = None):
-        # Here we want to initiate a traceback, if the reader is not built.
-        if isinstance(fields, types.StringTypes):
-            fields = field_aliases[fields]
-        '''
-        fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
-                If set to None, will try a default set of fields
-        '''
-        self.fluid_types += ("ramses",)
-        self._fields_in_file = fields
-        Dataset.__init__(self, filename, dataset_type)
-        self.storage_filename = storage_filename
-
-    def __repr__(self):
-        return self.basename.rsplit(".", 1)[0]
-
-    def _set_code_unit_attributes(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        # Note that unit_l *already* converts to proper!
-        # Also note that unit_l must be multiplied by the boxlen parameter to
-        # ensure we are correctly set up for the current domain.
-        length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
-        rho_u = self.parameters['unit_d']/ self.parameters['boxlen']**3
-        # We're not multiplying by the boxlength here.
-        mass_unit = rho_u * self.parameters['unit_l']**3
-        # Cosmological runs are done in lookback conformal time. 
-        #To convert to proper time, the time unit is calculated from 
-        # the expansion factor.
-        time_unit = self.parameters['unit_t']
-        magnetic_unit = np.sqrt(4*np.pi * mass_unit /
-                                (time_unit**2 * length_unit))
-        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
-        self.length_unit = self.quan(length_unit, "cm")
-        self.mass_unit = self.quan(mass_unit, "g")
-        self.time_unit = self.quan(time_unit, "s")
-        self.velocity_unit = self.length_unit / self.time_unit
-
-    def _parse_parameter_file(self):
-        # hardcoded for now
-        # These should be explicitly obtained from the file, but for now that
-        # will wait until a reorganization of the source tree and better
-        # generalization.
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = 'ramses'
-        self.parameters["Time"] = 1. # default unit is 1...
-
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        # We now execute the same logic Oliver's code does
-        rheader = {}
-        f = open(self.parameter_filename)
-        def read_rhs(cast):
-            line = f.readline()
-            p, v = line.split("=")
-            rheader[p.strip()] = cast(v)
-        for i in range(6): read_rhs(int)
-        f.readline()
-        for i in range(11): read_rhs(float)
-        f.readline()
-        read_rhs(str)
-        # This next line deserves some comment.  We specify a min_level that
-        # corresponds to the minimum level in the RAMSES simulation.  RAMSES is
-        # one-indexed, but it also does refer to the *oct* dimensions -- so
-        # this means that a levelmin of 1 would have *1* oct in it.  So a
-        # levelmin of 2 would have 8 octs at the root mesh level.
-        self.min_level = rheader['levelmin'] - 1
-        # Now we read the hilbert indices
-        self.hilbert_indices = {}
-        if rheader['ordering type'] == "hilbert":
-            f.readline() # header
-            for n in range(rheader['ncpu']):
-                dom, mi, ma = f.readline().split()
-                self.hilbert_indices[int(dom)] = (float(mi), float(ma))
-        self.parameters.update(rheader)
-        self.current_time = self.parameters['time'] 
-        self.domain_left_edge = np.zeros(3, dtype='float64')
-        self.domain_dimensions = np.ones(3, dtype='int32') * \
-                        2**(self.min_level+1)
-        self.domain_right_edge = np.ones(3, dtype='float64')
-        # This is likely not true, but I am not sure how to otherwise
-        # distinguish them.
-        mylog.warning("RAMSES frontend assumes all simulations are cosmological!")
-        self.cosmological_simulation = 1
-        self.periodicity = (True, True, True)
-        self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
-        self.omega_lambda = rheader["omega_l"]
-        self.omega_matter = rheader["omega_m"]
-        self.hubble_constant = rheader["H0"] / 100.0 # This is H100
-        self.max_level = rheader['levelmax'] - self.min_level
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        if not os.path.basename(args[0]).startswith("info_"): return False
-        fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
-        return os.path.exists(fn)

diff -r 2f08bb95b7f85079b204d2789d85e7a744bf49e5 -r 3e207233e7c01ed0c3a540831f302674a6c0ed47 yt/geometry/#oct_geometry_handler.py#
--- a/yt/geometry/#oct_geometry_handler.py#
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Octree geometry handler
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-import string, re, gc, time, cPickle
-import weakref
-
-from itertools import chain, izip
-
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.arraytypes import blankRecordArray
-from yt.config import ytcfg
-from yt.fields.field_info_container import NullFunc
-from yt.geometry.geometry_handler import Index, YTDataChunk
-from yt.utilities.definitions import MAXLEVEL
-from yt.utilities.io_handler import io_registry
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-
-from yt.data_objects.data_containers import data_object_registry
-
-class OctreeIndex(Index):
-    """The Index subclass for oct AMR datasets"""
-    def _setup_geometry(self):
-        mylog.debug("Initializing Octree Geometry Handler.")
-        self._initialize_oct_handler()
-
-    def get_smallest_dx(self):
-        """
-        Returns (in code units) the smallest cell size in the simulation.
-        """
-        return (self.dataset.domain_width /
-                (self.dataset.domain_dimensions * 2**(self.max_level))).min()
->>>>>>> other
-
-    def convert(self, unit):
-        return self.dataset.conversion_factors[unit]

diff -r 2f08bb95b7f85079b204d2789d85e7a744bf49e5 -r 3e207233e7c01ed0c3a540831f302674a6c0ed47 yt/geometry/oct_geometry_handler.py.orig
--- a/yt/geometry/oct_geometry_handler.py.orig
+++ /dev/null
@@ -1,55 +0,0 @@
-"""
-Octree geometry handler
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-import string, re, gc, time, cPickle
-import weakref
-
-from itertools import chain, izip
-
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.arraytypes import blankRecordArray
-from yt.config import ytcfg
-from yt.fields.field_info_container import NullFunc
-from yt.geometry.geometry_handler import Index, YTDataChunk
-from yt.utilities.definitions import MAXLEVEL
-from yt.utilities.io_handler import io_registry
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-
-from yt.data_objects.data_containers import data_object_registry
-
-class OctreeIndex(Index):
-    """The Index subclass for oct AMR datasets"""
-    def _setup_geometry(self):
-        mylog.debug("Initializing Octree Geometry Handler.")
-        self._initialize_oct_handler()
-
-    def get_smallest_dx(self):
-        """
-        Returns (in code units) the smallest cell size in the simulation.
-        """
-        return (self.dataset.domain_width /
-<<<<<<< local
-                (2**(self.dataset.max_level+1))).min()
-=======
-                (self.dataset.domain_dimensions * 2**(self.max_level))).min()
->>>>>>> other
-
-    def convert(self, unit):
-        return self.dataset.conversion_factors[unit]

diff -r 2f08bb95b7f85079b204d2789d85e7a744bf49e5 -r 3e207233e7c01ed0c3a540831f302674a6c0ed47 yt/geometry/oct_geometry_handler.py~
--- a/yt/geometry/oct_geometry_handler.py~
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-Octree geometry handler
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-import string, re, gc, time, cPickle
-import weakref
-
-from itertools import chain, izip
-
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.arraytypes import blankRecordArray
-from yt.config import ytcfg
-from yt.fields.field_info_container import NullFunc
-from yt.geometry.geometry_handler import Index, YTDataChunk
-from yt.utilities.definitions import MAXLEVEL
-from yt.utilities.io_handler import io_registry
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-
-from yt.data_objects.data_containers import data_object_registry
-
-class OctreeIndex(Index):
-    """The Index subclass for oct AMR datasets"""
-    def _setup_geometry(self):
-        mylog.debug("Initializing Octree Geometry Handler.")
-        self._initialize_oct_handler()
-
-    def get_smallest_dx(self):
-        """
-        Returns (in code units) the smallest cell size in the simulation.
-        """
-        return (self.dataset.domain_width /
-                (2**(self.max_level+1))).min()
-
-    def convert(self, unit):
-        return self.dataset.conversion_factors[unit]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b6bdd998975d/
Changeset:   b6bdd998975d
Branch:      yt
User:        RicardaBeckmann
Date:        2015-06-05 23:36:51+00:00
Summary:     updated hgignore again
Affected #:  1 file

diff -r 3e207233e7c01ed0c3a540831f302674a6c0ed47 -r b6bdd998975ddb1059fbf0361aa349dc464f78c7 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -66,3 +66,4 @@
 *~
 *#
 #*
+*.orig


https://bitbucket.org/yt_analysis/yt/commits/ac9b389726d8/
Changeset:   ac9b389726d8
Branch:      yt
User:        RicardaBeckmann
Date:        2015-06-06 13:14:16+00:00
Summary:     deleted accidental .c files, updated hgignore
Affected #:  3 files

diff -r b6bdd998975ddb1059fbf0361aa349dc464f78c7 -r ac9b389726d84d1ad045f033400fca87cb1b77da .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -11,6 +11,7 @@
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/analysis_modules/ppv_cube/ppv_utils.c
 yt/frontends/ramses/_ramses_reader.cpp
+yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c
 yt/geometry/grid_container.c
 yt/geometry/grid_visitors.c
@@ -40,6 +41,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
+yt/utilities/lib/GridTree.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
@@ -66,4 +68,3 @@
 *~
 *#
 #*
-*.orig

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/0956020c5cfb/
Changeset:   0956020c5cfb
Branch:      yt
User:        RicardaBeckmann
Date:        2015-06-07 10:21:01+00:00
Summary:     updated hgignore again
Affected #:  1 file

diff -r ac9b389726d84d1ad045f033400fca87cb1b77da -r 0956020c5cfb818058225264d06eb1f8d6d33a9a .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -65,6 +65,4 @@
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
 dist
-*~
-*#
-#*
+


https://bitbucket.org/yt_analysis/yt/commits/f66a7bff9387/
Changeset:   f66a7bff9387
Branch:      yt
User:        RicardaBeckmann
Date:        2015-06-07 10:26:19+00:00
Summary:     .hgignore edited online with Bitbucket
Affected #:  1 file

diff -r 0956020c5cfb818058225264d06eb1f8d6d33a9a -r f66a7bff938778f6b68b964a36928df5a3dd009f .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -65,4 +65,3 @@
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
 dist
-


https://bitbucket.org/yt_analysis/yt/commits/e6a1ea59160c/
Changeset:   e6a1ea59160c
Branch:      yt
User:        brittonsmith
Date:        2015-04-30 15:35:45+00:00
Summary:     Adding most of gadget_fof frontend.
Affected #:  12 files

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -27,6 +27,7 @@
     'fits',
     'flash',
     'gadget',
+    'gadget_fof',
     'gdf',
     'halo_catalog',
     'http_stream',

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -387,7 +387,7 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
         need_groups = ['Header']
-        veto_groups = ['FOF']
+        veto_groups = ['FOF', 'Group', 'Subhalo']
         valid = True
         try:
             fh = h5py.File(args[0], mode='r')

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/gadget_fof/__init__.py
--- /dev/null
+++ b/yt/frontends/gadget_fof/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for HaloCatalog frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/gadget_fof/api.py
--- /dev/null
+++ b/yt/frontends/gadget_fof/api.py
@@ -0,0 +1,26 @@
+"""
+API for GadgetFOF frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+     GadgetFOFDataset
+
+from .io import \
+     IOHandlerGadgetFOFHDF5
+
+from .fields import \
+     GadgetFOFFieldInfo
+
+from . import tests

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/gadget_fof/data_structures.py
--- /dev/null
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -0,0 +1,243 @@
+"""
+Data structures for GadgetFOF frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from collections import defaultdict
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+    GadgetFOFFieldInfo
+
+from yt.utilities.cosmology import \
+    Cosmology
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.exceptions import \
+    YTException
+from yt.utilities.logger import ytLogger as \
+    mylog
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, \
+    ParticleFile
+from yt.frontends.gadget.data_structures import \
+    _fix_unit_ordering
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+
+class GadgetFOFParticleIndex(ParticleIndex):
+    def __init__(self, ds, dataset_type):
+        super(GadgetFOFParticleIndex, self).__init__(ds, dataset_type)
+
+    def _calculate_particle_index_starts(self):
+        # Halo indices are not saved in the file, so we must count by hand.
+        # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
+        particle_count = defaultdict(int)
+        offset_count = 0
+        for data_file in self.data_files:
+            data_file.index_start = dict([(ptype, particle_count[ptype]) for
+                                           ptype in data_file.total_particles])
+            data_file.offset_start = offset_count
+            for ptype in data_file.total_particles:
+                particle_count[ptype] += data_file.total_particles[ptype]
+            offset_count += data_file.total_offset
+
+    def _calculate_file_offset_map(self):
+        # After the FOF  is performed, a load-balancing step redistributes halos 
+        # and then writes more fields.  Here, for each file, we create a list of 
+        # files which contain the rest of the redistributed particles.
+        ifof = np.array([data_file.total_particles["Group"]
+                         for data_file in self.data_files])
+        isub = np.array([data_file.total_offset
+                         for data_file in self.data_files])
+        subend = isub.cumsum()
+        fofend = ifof.cumsum()
+        istart = np.digitize(fofend - ifof, subend - isub) - 1
+        iend = np.clip(np.digitize(fofend, subend), 0, ifof.size - 2)
+        for i, data_file in enumerate(self.data_files):
+            data_file.offset_files = self.data_files[istart[i]: iend[i] + 1]
+
+    def _detect_output_fields(self):
+        # TODO: Add additional fields
+        dsl = []
+        units = {}
+        for dom in self.data_files:
+            fl, _units = self.io._identify_fields(dom)
+            units.update(_units)
+            dom._calculate_offsets(fl)
+            for f in fl:
+                if f not in dsl: dsl.append(f)
+        self.field_list = dsl
+        ds = self.dataset
+        ds.particle_types = tuple(set(pt for pt, ds in dsl))
+        # This is an attribute that means these particle types *actually*
+        # exist.  As in, they are real, in the dataset.
+        ds.field_units.update(units)
+        ds.particle_types_raw = ds.particle_types
+            
+    def _setup_geometry(self):
+        super(GadgetFOFParticleIndex, self)._setup_geometry()
+        self._calculate_particle_index_starts()
+        self._calculate_file_offset_map()
+    
+class GadgetFOFHDF5File(ParticleFile):
+    def __init__(self, ds, io, filename, file_id):
+        super(GadgetFOFHDF5File, self).__init__(ds, io, filename, file_id)
+        with h5py.File(filename, "r") as f:
+            self.header = dict((field, f.attrs[field]) \
+                               for field in f.attrs.keys())
+    
+class GadgetFOFDataset(Dataset):
+    _index_class = GadgetFOFParticleIndex
+    _file_class = GadgetFOFHDF5File
+    _field_info_class = GadgetFOFFieldInfo
+    _suffix = ".hdf5"
+
+    def __init__(self, filename, dataset_type="gadget_fof_hdf5",
+                 n_ref=16, over_refine_factor=1,
+                 unit_base=None, units_override=None):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        if unit_base is not None and "UnitLength_in_cm" in unit_base:
+            # We assume this is comoving, because in the absence of comoving
+            # integration the redshift will be zero.
+            unit_base['cmcm'] = 1.0 / unit_base["UnitLength_in_cm"]
+        self._unit_base = unit_base
+        if units_override is not None:
+            raise RuntimeError("units_override is not supported for GadgetFOFDataset. "+
+                               "Use unit_base instead.")
+        super(GadgetFOFDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
+
+    def _parse_parameter_file(self):
+        handle = h5py.File(self.parameter_filename, mode="r")
+        hvals = {}
+        hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+        hvals["NumFiles"] = hvals["NumFiles"]
+
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        # Set standard values
+        self.domain_left_edge = np.zeros(3, "float64")
+        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
+        self.current_redshift = hvals["Redshift"]
+        self.omega_lambda = hvals["OmegaLambda"]
+        self.omega_matter = hvals["Omega0"]
+        self.hubble_constant = hvals["HubbleParam"]
+
+        cosmology = Cosmology(hubble_constant=self.hubble_constant,
+                              omega_matter=self.omega_matter,
+                              omega_lambda=self.omega_lambda)
+        self.current_time = cosmology.t_from_z(self.current_redshift)
+
+        self.parameters = hvals
+        prefix = os.path.abspath(
+            os.path.join(os.path.dirname(self.parameter_filename), 
+                         os.path.basename(self.parameter_filename).split(".", 1)[0]))
+        
+        suffix = self.parameter_filename.rsplit(".", 1)[-1]
+        self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
+        self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+        if self.file_count == 0:
+            raise YTException(message="No data files found.", ds=self)
+        self.particle_types = ("Group", "Subhalo")
+        self.particle_types_raw = ("Group", "Subhalo")
+        
+        handle.close()
+
+    def _set_code_unit_attributes(self):
+        # Set a sane default for cosmological simulations.
+        if self._unit_base is None and self.cosmological_simulation == 1:
+            mylog.info("Assuming length units are in Mpc/h (comoving)")
+            self._unit_base = dict(length = (1.0, "Mpccm/h"))
+        # The other same defaults we will use from the standard Gadget
+        # defaults.
+        unit_base = self._unit_base or {}
+        
+        if "length" in unit_base:
+            length_unit = unit_base["length"]
+        elif "UnitLength_in_cm" in unit_base:
+            if self.cosmological_simulation == 0:
+                length_unit = (unit_base["UnitLength_in_cm"], "cm")
+            else:
+                length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
+        else:
+            raise RuntimeError
+        length_unit = _fix_unit_ordering(length_unit)
+        self.length_unit = self.quan(length_unit[0], length_unit[1])
+        
+        if "velocity" in unit_base:
+            velocity_unit = unit_base["velocity"]
+        elif "UnitVelocity_in_cm_per_s" in unit_base:
+            velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
+        else:
+            velocity_unit = (1e5, "cm/s")
+        velocity_unit = _fix_unit_ordering(velocity_unit)
+        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+
+        # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
+        # Default to 1e10 Msun/h if mass is not specified.
+        if "mass" in unit_base:
+            mass_unit = unit_base["mass"]
+        elif "UnitMass_in_g" in unit_base:
+            if self.cosmological_simulation == 0:
+                mass_unit = (unit_base["UnitMass_in_g"], "g")
+            else:
+                mass_unit = (unit_base["UnitMass_in_g"], "g/h")
+        else:
+            # Sane default
+            mass_unit = (1.0, "1e10*Msun/h")
+        mass_unit = _fix_unit_ordering(mass_unit)
+        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+
+        if "time" in unit_base:
+            time_unit = unit_base["time"]
+        elif "UnitTime_in_s" in unit_base:
+            time_unit = (unit_base["UnitTime_in_s"], "s")
+        else:
+            time_unit = (1., "s")        
+        self.time_unit = self.quan(time_unit[0], time_unit[1])
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        need_groups = ['Group', 'Header', 'Subhalo']
+        veto_groups = ['FOF']
+        valid = True
+        try:
+            fh = h5py.File(args[0], mode='r')
+            valid = all(ng in fh["/"] for ng in need_groups) and \
+              not any(vg in fh["/"] for vg in veto_groups)
+            fh.close()
+        except:
+            valid = False
+            pass
+        return valid

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/gadget_fof/fields.py
--- /dev/null
+++ b/yt/frontends/gadget_fof/fields.py
@@ -0,0 +1,40 @@
+"""
+GadgetFOF-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+from yt.units.yt_array import \
+    YTArray
+
+m_units = "code_mass"
+mdot_units = "code_mass / code_time"
+p_units = "Mpccm/h"
+v_units = "1e5 * cmcm / s"
+
+class GadgetFOFFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+        ("Pos_0", (p_units, ["particle_position_x"], None)),
+        ("Pos_1", (p_units, ["particle_position_y"], None)),
+        ("Pos_2", (p_units, ["particle_position_z"], None)),
+        ("Vel_0", (v_units, ["particle_velocity_x"], None)),
+        ("Vel_1", (v_units, ["particle_velocity_y"], None)),
+        ("Vel_2", (v_units, ["particle_velocity_z"], None)),
+        ("Mass", (m_units, ["particle_mass"], None)),
+)

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/gadget_fof/io.py
--- /dev/null
+++ b/yt/frontends/gadget_fof/io.py
@@ -0,0 +1,210 @@
+"""
+GadgetFOF data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import mylog
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import compute_morton
+
+class IOHandlerGadgetFOFHDF5(BaseIOHandler):
+    _dataset_type = "gadget_fof_hdf5"
+
+    def __init__(self, ds):
+        super(IOHandlerGadgetFOFHDF5, self).__init__(ds)
+        self.offset_fields = set([])
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    pcount = data_file.total_particles[ptype]
+                    coords = f[ptype]["%sPos" % ptype].value.astype("float64")
+                    coords = np.resize(coords, (pcount, 3))
+                    x = coords[:, 0]
+                    y = coords[:, 1]
+                    z = coords[:, 2]
+                    yield ptype, (x, y, z)
+
+    def _read_offset_particle_field(self, field, data_file, fh):
+        field_data = np.empty(data_file.total_particles["Group"], dtype="float64")
+        fofindex = np.arange(data_file.total_particles["Group"]) + data_file.index_start["FOF"]
+        for offset_file in data_file.offset_files:
+            if fh.filename == offset_file.filename:
+                ofh = fh
+            else:
+                ofh = h5py.File(offset_file.filename, "r")
+            subindex = np.arange(offset_file.total_offset) + offset_file.offset_start
+            substart = max(fofindex[0] - subindex[0], 0)
+            subend = min(fofindex[-1] - subindex[0], subindex.size - 1)
+            fofstart = substart + subindex[0] - fofindex[0]
+            fofend = subend + subindex[0] - fofindex[0]
+            field_data[fofstart:fofend + 1] = ofh["Subhalo"][field][substart:subend + 1]
+        return field_data
+                    
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    pcount = data_file.total_particles[ptype]
+                    if pcount == 0: continue
+                    coords = f[ptype]["%sPos" % ptype].value.astype("float64")
+                    coords = np.resize(coords, (pcount, 3))
+                    x = coords[:, 0]
+                    y = coords[:, 1]
+                    z = coords[:, 2]
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        if field in self.offset_fields:
+                            field_data = \
+                              self._read_offset_particle_field(field, data_file, f)
+                        else:
+                            if field == "particle_identifier":
+                                field_data = \
+                                  np.arange(data_file.total_particles[ptype]) + \
+                                  data_file.index_start[ptype]
+                            elif field in f[ptype]:
+                                field_data = f[ptype][field].value.astype("float64")
+                            else:
+                                fname = field[:field.rfind("_")]
+                                field_data = f[ptype][fname].value.astype("float64")
+                                my_div = field_data.size / pcount
+                                if my_div > 1:
+                                    field_data = np.resize(field_data, (pcount, my_div))
+                                    findex = int(field[field.rfind("_") + 1:])
+                                    field_data = field_data[:, findex]
+                        data = field_data[mask]
+                        yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        pcount = sum(data_file.total_particles.values())
+        morton = np.empty(pcount, dtype='uint64')
+        if pcount == 0: return morton
+        mylog.debug("Initializing index % 5i (% 7i particles)",
+                    data_file.file_id, pcount)
+        ind = 0
+        with h5py.File(data_file.filename, "r") as f:
+            if not f.keys(): return None
+            dx = np.finfo(f["Group"]["GroupPos"].dtype).eps
+            dx = 2.0*self.ds.quan(dx, "code_length")
+
+            for ptype in data_file.ds.particle_types_raw:
+                if data_file.total_particles[ptype] == 0: continue
+                pos = f[ptype]["%sPos" % ptype].value.astype("float64")
+                pos = np.resize(pos, (data_file.total_particles[ptype], 3))
+                pos = data_file.ds.arr(pos, "code_length")
+                
+                # These are 32 bit numbers, so we give a little lee-way.
+                # Otherwise, for big sets of particles, we often will bump into the
+                # domain edges.  This helps alleviate that.
+                np.clip(pos, self.ds.domain_left_edge + dx,
+                             self.ds.domain_right_edge - dx, pos)
+                if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+                   np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                    raise YTDomainOverflow(pos.min(axis=0),
+                                           pos.max(axis=0),
+                                           self.ds.domain_left_edge,
+                                           self.ds.domain_right_edge)
+                regions.add_data_file(pos, data_file.file_id)
+                morton[ind:ind+pos.shape[0]] = compute_morton(
+                    pos[:,0], pos[:,1], pos[:,2],
+                    data_file.ds.domain_left_edge,
+                    data_file.ds.domain_right_edge)
+                ind += pos.shape[0]
+        return morton
+
+    def _count_particles(self, data_file):
+        with h5py.File(data_file.filename, "r") as f:
+            pcount = {"Group": f["Header"].attrs["Ngroups_ThisFile"],
+                      "Subhalo": f["Header"].attrs["Nsubgroups_ThisFile"]}
+            data_file.total_offset = 0 # need to figure out how subfind works here
+            return pcount
+
+    def _identify_fields(self, data_file):
+        fields = []
+        pcount = data_file.total_particles
+        if sum(pcount.values()) == 0: return fields, {}
+        with h5py.File(data_file.filename, "r") as f:
+            for ptype in self.ds.particle_types_raw:
+                if data_file.total_particles[ptype] == 0: continue
+                fields.append((ptype, "particle_identifier"))
+                # my_fields, my_offset_fields = \
+                #   subfind_field_list(f[ptype], ptype, data_file.total_particles)
+                # fields.extend(my_fields)
+                # self.offset_fields = self.offset_fields.union(set(my_offset_fields))
+        return fields, {}
+
+def subfind_field_list(fh, ptype, pcount):
+    fields = []
+    offset_fields = []
+    for field in fh.keys():
+        if "PartType" in field:
+            # These are halo member particles
+            continue
+        elif isinstance(fh[field], h5py.Group):
+            my_fields, my_offset_fields = \
+              subfind_field_list(fh[field], ptype, pcount)
+            fields.extend(my_fields)
+            my_offset_fields.extend(offset_fields)
+        else:
+            if not fh[field].size % pcount[ptype]:
+                my_div = fh[field].size / pcount[ptype]
+                fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+                if my_div > 1:
+                    for i in range(my_div):
+                        fields.append((ptype, "%s_%d" % (fname, i)))
+                else:
+                    fields.append((ptype, fname))
+            elif ptype == "SUBFIND" and \
+              not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
+                # These are actually FOF fields, but they were written after 
+                # a load balancing step moved halos around and thus they do not
+                # correspond to the halos stored in the FOF group.
+                my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
+                fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+                if my_div > 1:
+                    for i in range(my_div):
+                        fields.append(("FOF", "%s_%d" % (fname, i)))
+                else:
+                    fields.append(("FOF", fname))
+                offset_fields.append(fname)
+            else:
+                mylog.warn("Cannot add field (%s, %s) with size %d." % \
+                           (ptype, fh[field].name, fh[field].size))
+                continue
+    return fields, offset_fields

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/gadget_fof/setup.py
--- /dev/null
+++ b/yt/frontends/gadget_fof/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('gadget_fof', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/gadget_fof/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -0,0 +1,50 @@
+"""
+GadgetFOF frontend tests using owls_fof_halos datasets
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os.path
+from yt.testing import \
+    assert_equal
+from yt.utilities.answer_testing.framework import \
+    FieldValuesTest, \
+    requires_ds, \
+    data_dir_load
+from yt.frontends.gadget_fof.api import GadgetFOFDataset
+
+_fields = ("particle_position_x", "particle_position_y",
+           "particle_position_z", "particle_mass")
+
+# a dataset with empty files
+g1 = "" # TBD
+g8 = "" # TBD
+
+
+ at requires_ds(g8)
+def test_fields_g8():
+    ds = data_dir_load(g8)
+    yield assert_equal, str(ds), os.path.basename(g8)
+    for field in _fields:
+        yield FieldValuesTest(g8, field, particle_type=True)
+
+
+ at requires_ds(g1)
+def test_fields_g1():
+    ds = data_dir_load(g1)
+    yield assert_equal, str(ds), os.path.basename(g1)
+    for field in _fields:
+        yield FieldValuesTest(g1, field, particle_type=True)
+
+ at requires_file(g1)
+def test_GadgetFOFDataset():
+    assert isinstance(data_dir_load(g1), GadgetFOFDataset)

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -27,11 +27,14 @@
 from .fields import \
     OWLSSubfindFieldInfo
 
-from yt.utilities.cosmology import Cosmology
+from yt.utilities.cosmology import \
+    Cosmology
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from yt.utilities.exceptions import \
-     YTException
+    YTException
+from yt.utilities.logger import ytLogger as \
+     mylog
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 from yt.data_objects.static_output import \
@@ -170,6 +173,7 @@
         # The other same defaults we will use from the standard Gadget
         # defaults.
         unit_base = self._unit_base or {}
+
         if "length" in unit_base:
             length_unit = unit_base["length"]
         elif "UnitLength_in_cm" in unit_base:
@@ -182,7 +186,6 @@
         length_unit = _fix_unit_ordering(length_unit)
         self.length_unit = self.quan(length_unit[0], length_unit[1])
 
-        unit_base = self._unit_base or {}
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
         elif "UnitVelocity_in_cm_per_s" in unit_base:
@@ -191,6 +194,7 @@
             velocity_unit = (1e5, "cm/s")
         velocity_unit = _fix_unit_ordering(velocity_unit)
         self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+
         # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
         # Default to 1e10 Msun/h if mass is not specified.
         if "mass" in unit_base:
@@ -205,7 +209,14 @@
             mass_unit = (1.0, "1e10*Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
         self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
-        self.time_unit = self.quan(unit_base["UnitTime_in_s"], "s")
+
+        if "time" in unit_base:
+            time_unit = unit_base["time"]
+        elif "UnitTime_in_s" in unit_base:
+            time_unit = (unit_base["UnitTime_in_s"], "s")
+        else:
+            time_unit = (1., "s")        
+        self.time_unit = self.quan(time_unit[0], time_unit[1])
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r f66a7bff938778f6b68b964a36928df5a3dd009f -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -17,6 +17,7 @@
     config.add_subpackage("fits")
     config.add_subpackage("flash")
     config.add_subpackage("gadget")
+    config.add_subpackage("gadget_fof")
     config.add_subpackage("gdf")
     config.add_subpackage("halo_catalog")
     config.add_subpackage("http_stream")
@@ -34,11 +35,12 @@
     config.add_subpackage("athena/tests")
     config.add_subpackage("boxlib/tests")
     config.add_subpackage("chombo/tests")
+    config.add_subpackage("eagle/tests")
     config.add_subpackage("enzo/tests")
-    config.add_subpackage("eagle/tests")
     config.add_subpackage("fits/tests")
     config.add_subpackage("flash/tests")
     config.add_subpackage("gadget/tests")
+    config.add_subpackage("gadget_fof/tests")
     config.add_subpackage("moab/tests")
     config.add_subpackage("owls/tests")
     config.add_subpackage("owls_subfind/tests")


https://bitbucket.org/yt_analysis/yt/commits/fea5d27324ad/
Changeset:   fea5d27324ad
Branch:      yt
User:        brittonsmith
Date:        2015-04-30 15:51:24+00:00
Summary:     Fixing some units and enabling field detection.
Affected #:  3 files

diff -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 -r fea5d27324ad3891dc37a4b0c6afde553d035627 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -200,7 +200,10 @@
         elif "UnitVelocity_in_cm_per_s" in unit_base:
             velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
         else:
-            velocity_unit = (1e5, "cm/s")
+            if self.cosmological_simulation == 0:
+                velocity_unit = (1e5, "cm/s")
+            else:
+                velocity_unit = (1e5, "cmcm/s")
         velocity_unit = _fix_unit_ordering(velocity_unit)
         self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
 

diff -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 -r fea5d27324ad3891dc37a4b0c6afde553d035627 yt/frontends/gadget_fof/fields.py
--- a/yt/frontends/gadget_fof/fields.py
+++ b/yt/frontends/gadget_fof/fields.py
@@ -21,20 +21,20 @@
     YTArray
 
 m_units = "code_mass"
-mdot_units = "code_mass / code_time"
-p_units = "Mpccm/h"
-v_units = "1e5 * cmcm / s"
+p_units = "code_length"
+v_units = "code_velocity"
 
 class GadgetFOFFieldInfo(FieldInfoContainer):
     known_other_fields = (
     )
 
     known_particle_fields = (
-        ("Pos_0", (p_units, ["particle_position_x"], None)),
-        ("Pos_1", (p_units, ["particle_position_y"], None)),
-        ("Pos_2", (p_units, ["particle_position_z"], None)),
-        ("Vel_0", (v_units, ["particle_velocity_x"], None)),
-        ("Vel_1", (v_units, ["particle_velocity_y"], None)),
-        ("Vel_2", (v_units, ["particle_velocity_z"], None)),
-        ("Mass", (m_units, ["particle_mass"], None)),
+        ("GroupPos_0", (p_units, ["particle_position_x"], None)),
+        ("GroupPos_1", (p_units, ["particle_position_y"], None)),
+        ("GroupPos_2", (p_units, ["particle_position_z"], None)),
+        ("GroupVel_0", (v_units, ["particle_velocity_x"], None)),
+        ("GroupVel_1", (v_units, ["particle_velocity_y"], None)),
+        ("GroupVel_2", (v_units, ["particle_velocity_z"], None)),
+        ("GroupMass",  (m_units, ["particle_mass"], None)),
+        ("GroupLen",   ("",      ["particle_number"], None)),
 )

diff -r e6a1ea59160c3f2bd688baa7b47ffcb5e019cad3 -r fea5d27324ad3891dc37a4b0c6afde553d035627 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -163,9 +163,9 @@
             for ptype in self.ds.particle_types_raw:
                 if data_file.total_particles[ptype] == 0: continue
                 fields.append((ptype, "particle_identifier"))
-                # my_fields, my_offset_fields = \
-                #   subfind_field_list(f[ptype], ptype, data_file.total_particles)
-                # fields.extend(my_fields)
+                my_fields, my_offset_fields = \
+                  subfind_field_list(f[ptype], ptype, data_file.total_particles)
+                fields.extend(my_fields)
                 # self.offset_fields = self.offset_fields.union(set(my_offset_fields))
         return fields, {}
 
@@ -173,10 +173,7 @@
     fields = []
     offset_fields = []
     for field in fh.keys():
-        if "PartType" in field:
-            # These are halo member particles
-            continue
-        elif isinstance(fh[field], h5py.Group):
+        if isinstance(fh[field], h5py.Group):
             my_fields, my_offset_fields = \
               subfind_field_list(fh[field], ptype, pcount)
             fields.extend(my_fields)
@@ -190,19 +187,19 @@
                         fields.append((ptype, "%s_%d" % (fname, i)))
                 else:
                     fields.append((ptype, fname))
-            elif ptype == "SUBFIND" and \
-              not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
-                # These are actually FOF fields, but they were written after 
-                # a load balancing step moved halos around and thus they do not
-                # correspond to the halos stored in the FOF group.
-                my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
-                fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
-                if my_div > 1:
-                    for i in range(my_div):
-                        fields.append(("FOF", "%s_%d" % (fname, i)))
-                else:
-                    fields.append(("FOF", fname))
-                offset_fields.append(fname)
+            # elif ptype == "SUBFIND" and \
+            #   not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
+            #     # These are actually FOF fields, but they were written after 
+            #     # a load balancing step moved halos around and thus they do not
+            #     # correspond to the halos stored in the FOF group.
+            #     my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
+            #     fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+            #     if my_div > 1:
+            #         for i in range(my_div):
+            #             fields.append(("FOF", "%s_%d" % (fname, i)))
+            #     else:
+            #         fields.append(("FOF", fname))
+            #     offset_fields.append(fname)
             else:
                 mylog.warn("Cannot add field (%s, %s) with size %d." % \
                            (ptype, fh[field].name, fh[field].size))


https://bitbucket.org/yt_analysis/yt/commits/4ca7a59b1c8a/
Changeset:   4ca7a59b1c8a
Branch:      yt
User:        brittonsmith
Date:        2015-04-30 15:55:00+00:00
Summary:     Leaving note in case we need to implement offset fields.  That was hard enough the first time.
Affected #:  1 file

diff -r fea5d27324ad3891dc37a4b0c6afde553d035627 -r 4ca7a59b1c8ae2ff07c67dba3a81ec776d0b4b34 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -166,7 +166,7 @@
                 my_fields, my_offset_fields = \
                   subfind_field_list(f[ptype], ptype, data_file.total_particles)
                 fields.extend(my_fields)
-                # self.offset_fields = self.offset_fields.union(set(my_offset_fields))
+                self.offset_fields = self.offset_fields.union(set(my_offset_fields))
         return fields, {}
 
 def subfind_field_list(fh, ptype, pcount):
@@ -187,6 +187,8 @@
                         fields.append((ptype, "%s_%d" % (fname, i)))
                 else:
                     fields.append((ptype, fname))
+            ### Leave this block of code in case we need to do this.
+            ### This will have to wait until I get a dataset with subhalos.
             # elif ptype == "SUBFIND" and \
             #   not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
             #     # These are actually FOF fields, but they were written after 


https://bitbucket.org/yt_analysis/yt/commits/6bd634cfa633/
Changeset:   6bd634cfa633
Branch:      yt
User:        brittonsmith
Date:        2015-07-10 12:30:35+00:00
Summary:     Enabling subhalo fields.
Affected #:  1 file

diff -r 4ca7a59b1c8ae2ff07c67dba3a81ec776d0b4b34 -r 6bd634cfa633a1e93df5914e368c7a00e7966647 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -55,7 +55,7 @@
 
     def _read_offset_particle_field(self, field, data_file, fh):
         field_data = np.empty(data_file.total_particles["Group"], dtype="float64")
-        fofindex = np.arange(data_file.total_particles["Group"]) + data_file.index_start["FOF"]
+        fofindex = np.arange(data_file.total_particles["Group"]) + data_file.index_start["Group"]
         for offset_file in data_file.offset_files:
             if fh.filename == offset_file.filename:
                 ofh = fh
@@ -187,21 +187,19 @@
                         fields.append((ptype, "%s_%d" % (fname, i)))
                 else:
                     fields.append((ptype, fname))
-            ### Leave this block of code in case we need to do this.
-            ### This will have to wait until I get a dataset with subhalos.
-            # elif ptype == "SUBFIND" and \
-            #   not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
-            #     # These are actually FOF fields, but they were written after 
-            #     # a load balancing step moved halos around and thus they do not
-            #     # correspond to the halos stored in the FOF group.
-            #     my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
-            #     fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
-            #     if my_div > 1:
-            #         for i in range(my_div):
-            #             fields.append(("FOF", "%s_%d" % (fname, i)))
-            #     else:
-            #         fields.append(("FOF", fname))
-            #     offset_fields.append(fname)
+            elif ptype == "Subfind" and \
+              not fh[field].size % fh["/Subfind"].attrs["Number_of_groups"]:
+                # These are actually Group fields, but they were written after 
+                # a load balancing step moved halos around and thus they do not
+                # correspond to the halos stored in the Group group.
+                my_div = fh[field].size / fh["/Subfind"].attrs["Number_of_groups"]
+                fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
+                if my_div > 1:
+                    for i in range(my_div):
+                        fields.append(("Group", "%s_%d" % (fname, i)))
+                else:
+                    fields.append(("Group", fname))
+                offset_fields.append(fname)
             else:
                 mylog.warn("Cannot add field (%s, %s) with size %d." % \
                            (ptype, fh[field].name, fh[field].size))


https://bitbucket.org/yt_analysis/yt/commits/8c733329ae76/
Changeset:   8c733329ae76
Branch:      yt
User:        brittonsmith
Date:        2015-07-10 12:36:34+00:00
Summary:     Adding subhalo fields.
Affected #:  1 file

diff -r 6bd634cfa633a1e93df5914e368c7a00e7966647 -r 8c733329ae760445afefde4b7c81d96d8fd1086e yt/frontends/gadget_fof/fields.py
--- a/yt/frontends/gadget_fof/fields.py
+++ b/yt/frontends/gadget_fof/fields.py
@@ -29,12 +29,20 @@
     )
 
     known_particle_fields = (
-        ("GroupPos_0", (p_units, ["particle_position_x"], None)),
-        ("GroupPos_1", (p_units, ["particle_position_y"], None)),
-        ("GroupPos_2", (p_units, ["particle_position_z"], None)),
-        ("GroupVel_0", (v_units, ["particle_velocity_x"], None)),
-        ("GroupVel_1", (v_units, ["particle_velocity_y"], None)),
-        ("GroupVel_2", (v_units, ["particle_velocity_z"], None)),
-        ("GroupMass",  (m_units, ["particle_mass"], None)),
-        ("GroupLen",   ("",      ["particle_number"], None)),
+        ("GroupPos_0", (p_units, ["Group", "particle_position_x"], None)),
+        ("GroupPos_1", (p_units, ["Group", "particle_position_y"], None)),
+        ("GroupPos_2", (p_units, ["Group", "particle_position_z"], None)),
+        ("GroupVel_0", (v_units, ["Group", "particle_velocity_x"], None)),
+        ("GroupVel_1", (v_units, ["Group", "particle_velocity_y"], None)),
+        ("GroupVel_2", (v_units, ["Group", "particle_velocity_z"], None)),
+        ("GroupMass",  (m_units, ["Group", "particle_mass"], None)),
+        ("GroupLen",   ("",      ["Group", "particle_number"], None)),
+        ("SubhaloPos_0", (p_units, ["Subhalo", "particle_position_x"], None)),
+        ("SubhaloPos_1", (p_units, ["Subhalo", "particle_position_y"], None)),
+        ("SubhaloPos_2", (p_units, ["Subhalo", "particle_position_z"], None)),
+        ("SubhaloVel_0", (v_units, ["Subhalo", "particle_velocity_x"], None)),
+        ("SubhaloVel_1", (v_units, ["Subhalo", "particle_velocity_y"], None)),
+        ("SubhaloVel_2", (v_units, ["Subhalo", "particle_velocity_z"], None)),
+        ("SubhaloMass",  (m_units, ["Subhalo", "particle_mass"], None)),
+        ("SubhaloLen",   ("",      ["Subhalo", "particle_number"], None)),
 )


https://bitbucket.org/yt_analysis/yt/commits/006e8a47b7ba/
Changeset:   006e8a47b7ba
Branch:      yt
User:        brittonsmith
Date:        2015-07-10 14:20:47+00:00
Summary:     Updating tests with real data.
Affected #:  1 file

diff -r 8c733329ae760445afefde4b7c81d96d8fd1086e -r 006e8a47b7bafad3c884baf48a8b7e40dfe911aa yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-GadgetFOF frontend tests using owls_fof_halos datasets
+GadgetFOF frontend tests using gadget_fof datasets
 
 
 
@@ -19,32 +19,38 @@
 from yt.utilities.answer_testing.framework import \
     FieldValuesTest, \
     requires_ds, \
+    requires_file, \
     data_dir_load
 from yt.frontends.gadget_fof.api import GadgetFOFDataset
 
-_fields = ("particle_position_x", "particle_position_y",
-           "particle_position_z", "particle_mass")
+p_types  = ("Group", "Subhalo")
+p_fields = ("particle_position_x", "particle_position_y",
+            "particle_position_z", "particle_velocity_x",
+            "particle_velocity_y", "particle_velocity_z",
+            "particle_mass", "particle_identifier")
+_fields = tuple([(p_type, p_field) for p_type in p_types
+                                   for p_field in p_fields])
 
 # a dataset with empty files
-g1 = "" # TBD
-g8 = "" # TBD
+g5 = "gadget_fof/groups_005/fof_subhalo_tab_005.0.hdf5"
+g42 = "gadget_fof/groups_042/fof_subhalo_tab_042.0.hdf5"
 
 
- at requires_ds(g8)
-def test_fields_g8():
-    ds = data_dir_load(g8)
-    yield assert_equal, str(ds), os.path.basename(g8)
+ at requires_ds(g5)
+def test_fields_g5():
+    ds = data_dir_load(g5)
+    yield assert_equal, str(ds), os.path.basename(g5)
     for field in _fields:
-        yield FieldValuesTest(g8, field, particle_type=True)
+        yield FieldValuesTest(g5, field, particle_type=True)
 
 
- at requires_ds(g1)
-def test_fields_g1():
-    ds = data_dir_load(g1)
-    yield assert_equal, str(ds), os.path.basename(g1)
+ at requires_ds(g42)
+def test_fields_g42():
+    ds = data_dir_load(g42)
+    yield assert_equal, str(ds), os.path.basename(g42)
     for field in _fields:
-        yield FieldValuesTest(g1, field, particle_type=True)
+        yield FieldValuesTest(g42, field, particle_type=True)
 
- at requires_file(g1)
+ at requires_file(g42)
 def test_GadgetFOFDataset():
-    assert isinstance(data_dir_load(g1), GadgetFOFDataset)
+    assert isinstance(data_dir_load(g42), GadgetFOFDataset)


https://bitbucket.org/yt_analysis/yt/commits/9051b33b1980/
Changeset:   9051b33b1980
Branch:      yt
User:        brittonsmith
Date:        2015-07-10 14:29:43+00:00
Summary:     Updating test data name.
Affected #:  1 file

diff -r 006e8a47b7bafad3c884baf48a8b7e40dfe911aa -r 9051b33b19804907950cfb614c66c6fbe7ca9714 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -32,8 +32,8 @@
                                    for p_field in p_fields])
 
 # a dataset with empty files
-g5 = "gadget_fof/groups_005/fof_subhalo_tab_005.0.hdf5"
-g42 = "gadget_fof/groups_042/fof_subhalo_tab_042.0.hdf5"
+g5 = "gadget_fof_halos/groups_005/fof_subhalo_tab_005.0.hdf5"
+g42 = "gadget_fof_halos/groups_042/fof_subhalo_tab_042.0.hdf5"
 
 
 @requires_ds(g5)


https://bitbucket.org/yt_analysis/yt/commits/19643c0f4960/
Changeset:   19643c0f4960
Branch:      yt
User:        brittonsmith
Date:        2015-07-10 15:13:12+00:00
Summary:     Adding docs on loading halo catalogs.
Affected #:  1 file

diff -r 9051b33b19804907950cfb614c66c6fbe7ca9714 -r 19643c0f4960d876d01349fb0f46050cff25a113 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1079,6 +1079,76 @@
 
 .. _loading-pyne-data:
 
+Halo Catalog Data
+-----------------
+
+yt has support for reading halo catalogs produced by Rockstar and the inline 
+FOF/SUBFIND halo finders of Gadget and OWLS.  The halo catalogs are treated as 
+particle datasets where each particle represents a single halo.  At this time, 
+yt does not have the ability to load the member particles for a given halo.  
+However, once loaded, further halo analysis can be performed using 
+:ref:`halo_catalog`.
+
+In the case where halo catalogs are written to multiple files, one must only 
+give the path to one of them.
+
+Gadget FOF/SUBFIND
+^^^^^^^^^^^^^^^^^^
+
+The two field types for GadgetFOF data are "Group" (FOF) and "Subhalo" (SUBFIND).
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("gadget_fof_halos/groups_042/fof_subhalo_tab_042.0.hdf5")
+   ad = ds.all_data()
+   # The halo mass
+   print ad["Group", "particle_mass"]
+   print ad["Subhalo", "particle_mass"]
+   # Halo ID
+   print ad["Group", "particle_identifier"]
+   print ad["Subhalo", "particle_identifier"]
+   # positions
+   print ad["Group", "particle_position_x"]
+   # velocities
+   print ad["Group", "particle_velocity_x"]
+
+Multidimensional fields can be accessed through the field name followed by an 
+underscore and the index.
+
+.. code-block:: python
+
+   # x component of the spin
+   print ad["Subhalo", "SubhaloSpin_0"]
+
+OWLS FOF/SUBFIND
+^^^^^^^^^^^^^^^^
+
+OWLS halo catalogs have a very similar structure to regular Gadget halo catalogs.  
+The two field types are "FOF" and "SUBFIND".
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("owls_fof_halos/groups_008/group_008.0.hdf5")
+   ad = ds.all_data()
+   # The halo mass
+   print ad["FOF", "particle_mass"]
+
+Rockstar
+^^^^^^^^
+
+Rockstar halo catalogs are loaded by providing the path to one of the .bin files.
+The single field type available is "halos".
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("rockstar_halos/halos_0.0.bin")
+   ad = ds.all_data()
+   # The halo mass
+   print ad["halos", "particle_mass"]
+
 PyNE Data
 ---------
 


https://bitbucket.org/yt_analysis/yt/commits/ff1a3c631536/
Changeset:   ff1a3c631536
Branch:      yt
User:        brittonsmith
Date:        2015-07-11 16:17:44+00:00
Summary:     Adding code of conduct.
Affected #:  1 file

diff -r 19643c0f4960d876d01349fb0f46050cff25a113 -r ff1a3c631536434c9d177fa68bfb473bc37d9465 doc/source/developing/intro.rst
--- a/doc/source/developing/intro.rst
+++ b/doc/source/developing/intro.rst
@@ -142,3 +142,77 @@
 federated database for simulation outputs, and so on and so forth.
 
 yt is an ambitious project.  Let's be ambitious together.
+
+yt Community Code of Conduct
+----------------------------
+
+The community of participants in open source 
+Astronomy projects is made up of members from around the
+globe with a diverse set of skills, personalities, and
+experiences. It is through these differences that our
+community experiences success and continued growth. We
+expect everyone in our community to follow these guidelines
+when interacting with others both inside and outside of our
+community. Our goal is to keep ours a positive, inclusive,
+successful, and growing community.
+
+As members of the community,
+
+- We pledge to treat all people with respect and
+  provide a harassment- and bullying-free environment,
+  regardless of sex, sexual orientation and/or gender
+  identity, disability, physical appearance, body size,
+  race, nationality, ethnicity, and religion. In
+  particular, sexual language and imagery, sexist,
+  racist, or otherwise exclusionary jokes are not
+  appropriate.
+
+- We pledge to respect the work of others by
+  recognizing acknowledgment/citation requests of
+  original authors. As authors, we pledge to be explicit
+  about how we want our own work to be cited or
+  acknowledged.
+
+- We pledge to welcome those interested in joining the
+  community, and realize that including people with a
+  variety of opinions and backgrounds will only serve to
+  enrich our community. In particular, discussions
+  relating to pros/cons of various technologies,
+  programming languages, and so on are welcome, but
+  these should be done with respect, taking proactive
+  measure to ensure that all participants are heard and
+  feel confident that they can freely express their
+  opinions.
+
+- We pledge to welcome questions and answer them
+  respectfully, paying particular attention to those new
+  to the community. We pledge to provide respectful
+  criticisms and feedback in forums, especially in
+  discussion threads resulting from code
+  contributions.
+
+- We pledge to be conscientious of the perceptions of
+  the wider community and to respond to criticism
+  respectfully. We will strive to model behaviors that
+  encourage productive debate and disagreement, both
+  within our community and where we are criticized. We
+  will treat those outside our community with the same
+  respect as people within our community.
+
+- We pledge to help the entire community follow the
+  code of conduct, and to not remain silent when we see
+  violations of the code of conduct. We will take action
+  when members of our community violate this code such as
+  contacting confidential at astropy.org (all emails sent to
+  this address will be treated with the strictest
+  confidence) or talking privately with the person.
+
+This code of conduct applies to all
+community situations online and offline, including mailing
+lists, forums, social media, conferences, meetings,
+associated social events, and one-to-one interactions.
+
+The yt Community Code of Conduct was adapted from the 
+`Astropy Community Code of Conduct 
+<http://www.astropy.org/about.html#codeofconduct>`_,
+which was partially inspired by the PSF code of conduct.


https://bitbucket.org/yt_analysis/yt/commits/04499d34ddbd/
Changeset:   04499d34ddbd
Branch:      yt
User:        brittonsmith
Date:        2015-07-16 16:54:45+00:00
Summary:     Changing email address and astronomy to scientific.
Affected #:  1 file

diff -r ff1a3c631536434c9d177fa68bfb473bc37d9465 -r 04499d34ddbd8f91389560984b3da1bc75ac3e51 doc/source/developing/intro.rst
--- a/doc/source/developing/intro.rst
+++ b/doc/source/developing/intro.rst
@@ -147,7 +147,7 @@
 ----------------------------
 
 The community of participants in open source 
-Astronomy projects is made up of members from around the
+Scientific projects is made up of members from around the
 globe with a diverse set of skills, personalities, and
 experiences. It is through these differences that our
 community experiences success and continued growth. We
@@ -203,7 +203,7 @@
   code of conduct, and to not remain silent when we see
   violations of the code of conduct. We will take action
   when members of our community violate this code such as
-  contacting confidential at astropy.org (all emails sent to
+  contacting confidential at yt-project.org (all emails sent to
   this address will be treated with the strictest
   confidence) or talking privately with the person.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list