[yt-svn] commit/yt: 19 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Jun 3 09:39:51 PDT 2015


19 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/7534439b02fd/
Changeset:   7534439b02fd
Branch:      yt
User:        Ben Thompson
Date:        2015-05-12 19:47:57+00:00
Summary:     updates
Affected #:  2 files

diff -r 7fe9f94e6991070c0888113d7995d543538836b7 -r 7534439b02fd5bbbe9653408ff6af9c3356b8700 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -76,6 +76,16 @@
         return rv
     return _AllFields
 
+def get_angular_momentum_componants(ptype, data, spos, svel):
+    if data.has_field_parameter("normal"):
+       normal = data.get_field_parameter("normal")
+    else:
+       normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
+    vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+    return pos, vel, normal, bv
+
 def _field_concat_slice(fname, axi):
     def _AllFields(field, data):
         v = []
@@ -118,10 +128,8 @@
              units = "g")
 
     def particle_density(field, data):
-        pos = data[ptype, coord_name]
-        mass = data[ptype, mass_name]
-        pos.convert_to_units("code_length")
-        mass.convert_to_units("code_mass")
+        pos = data[ptype, coord_name].convert_to_units("code_length")
+        mass = data[ptype, mass_name].convert_to_units("code_mass")
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
         d = data.ds.arr(d, "code_mass")
         d /= data["index", "cell_volume"]
@@ -249,12 +257,11 @@
                              svel = "particle_velocity_%s"):
     # This function will set things up based on the scalar fields and standard
     # yt field names.
+    # data.get_field_parameter("bulk_velocity") defaults to YTArray([0,0,0] cm/s)
 
     def _particle_velocity_magnitude(field, data):
         """ M{|v|} """
         bulk_velocity = data.get_field_parameter("bulk_velocity")
-        if bulk_velocity is None:
-            bulk_velocity = np.zeros(3)
         return np.sqrt((data[ptype, svel % 'x'] - bulk_velocity[0])**2
                      + (data[ptype, svel % 'y'] - bulk_velocity[1])**2
                      + (data[ptype, svel % 'z'] - bulk_velocity[2])**2 )
@@ -270,19 +277,11 @@
         Calculate the angular of a particle velocity.  Returns a vector for each
         particle.
         """
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        xv = data[ptype, svel % 'x'] - bv[0]
-        yv = data[ptype, svel % 'y'] - bv[1]
-        zv = data[ptype, svel % 'z'] - bv[2]
         center = data.get_field_parameter('center')
-        coords = self.ds.arr([data[ptype, spos % d] for d in 'xyz'],
-                             dtype=np.float64, units='cm')
-        new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-        r_vec = coords - np.reshape(center,new_shape)
-        v_vec = self.ds.arr([xv,yv,zv], dtype=np.float64, units='cm/s')
-        return np.cross(r_vec, v_vec, axis=0).T
+        pos, vel, normal, bv = get_angular_momentum_componants(ptype, data, spos, svel)
+        L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)
+        return np.cross(r_vec, v_vec)
+
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),
               function=_particle_specific_angular_momentum,
@@ -291,15 +290,23 @@
               validators=[ValidateParameter("center")])
 
     def _particle_specific_angular_momentum_x(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
+        """
+        Computes the specific angular momentum of a field in the x direction
+
+        If the dataset has the field parameter "normal", then the 
+        specific angular momentum is calculated in the x axis relative
+        to the normal vector.
+
+        Note that the orientation of the x and y axes are arbitrary if a normal
+        vector is defined.
+        """
         center = data.get_field_parameter('center')
-        y = data[ptype, spos % "y"] - center[1]
-        z = data[ptype, spos % "z"] - center[2]
-        yv = data[ptype, svel % "y"] - bv[1]
-        zv = data[ptype, svel % "z"] - bv[2]
-        return yv*z - zv*y
+        pos, vel, normal, bv = get_angular_momentum_componants(ptype, data, spos, svel)
+        L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
+        pos = pos.T
+        vel = vel.T
+        y, z, yv, zv = pos[1], pos[2], vel[1], vel[2]
+        return y*zv - z*yv
 
     registry.add_field((ptype, "particle_specific_angular_momentum_x"),
               function=_particle_specific_angular_momentum_x,
@@ -308,15 +315,24 @@
               validators=[ValidateParameter("center")])
 
     def _particle_specific_angular_momentum_y(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
+        """
+        Computes the specific angular momentum of a field in the y direction
+
+        If the dataset has the field parameter "normal", then the 
+        specific angular momentum is calculated in the y axis relative
+        to the normal vector.
+
+        Note that the orientation of the x and y axes are arbitrary if a normal
+        vector is defined.
+        """
         center = data.get_field_parameter('center')
-        x = data[ptype, spos % "x"] - center[0]
-        z = data[ptype, spos % "z"] - center[2]
-        xv = data[ptype, svel % "x"] - bv[0]
-        zv = data[ptype, svel % "z"] - bv[2]
-        return -(xv*z - zv*x)
+        pos, vel, normal, bv = get_angular_momentum_componants(ptype, data, spos, svel)
+        L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
+        pos = pos.T
+        vel = vel.T
+        x, z, xv, zv = pos[0], pos[2], vel[0], vel[2]
+        return -(x*zv - z*xv)
+
 
     registry.add_field((ptype, "particle_specific_angular_momentum_y"),
               function=_particle_specific_angular_momentum_y,
@@ -325,15 +341,24 @@
               validators=[ValidateParameter("center")])
 
     def _particle_specific_angular_momentum_z(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
+        """
+        Computes the specific angular momentum of a field in the z direction
+
+        If the dataset has the field parameter "normal", then the 
+        specific angular momentum is calculated in the z axis relative
+        to the normal vector.
+
+        Note that the orientation of the x and y axes are arbitrary if a normal
+        vector is defined.
+        """
         center = data.get_field_parameter('center')
-        x = data[ptype, spos % "x"] - center[0]
-        y = data[ptype, spos % "y"] - center[1]
-        xv = data[ptype, svel % "x"] - bv[0]
-        yv = data[ptype, svel % "y"] - bv[1]
-        return xv*y - yv*x
+        pos, vel, normal, bv = get_angular_momentum_componants(ptype, data, spos, svel)
+        L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
+        pos = pos.T
+        vel = vel.T
+        x, y, xv, yv = pos[0], pos[1], vel[0], vel[1]
+        return x*yv - y*xv
+
 
     registry.add_field((ptype, "particle_specific_angular_momentum_z"),
               function=_particle_specific_angular_momentum_z,
@@ -347,6 +372,7 @@
     def _particle_angular_momentum_x(field, data):
         return data[ptype, "particle_mass"] * \
                data[ptype, "particle_specific_angular_momentum_x"]
+
     registry.add_field((ptype, "particle_angular_momentum_x"),
              function=_particle_angular_momentum_x,
              units="g*cm**2/s", particle_type=True,
@@ -355,6 +381,7 @@
     def _particle_angular_momentum_y(field, data):
         return data[ptype, "particle_mass"] * \
                data[ptype, "particle_specific_angular_momentum_y"]
+
     registry.add_field((ptype, "particle_angular_momentum_y"),
              function=_particle_angular_momentum_y,
              units="g*cm**2/s", particle_type=True,
@@ -363,14 +390,17 @@
     def _particle_angular_momentum_z(field, data):
         return data[ptype, "particle_mass"] * \
                data[ptype, "particle_specific_angular_momentum_z"]
+
     registry.add_field((ptype, "particle_angular_momentum_z"),
              function=_particle_angular_momentum_z,
              units="g*cm**2/s", particle_type=True,
              validators=[ValidateParameter('center')])
 
     def _particle_angular_momentum(field, data):
-        return (data[ptype, "particle_mass"] *
-                data[ptype, "particle_specific_angular_momentum"].T).T
+        return data[ptype, "particle_mass"] \
+            * data[ptype, "particle_specific_angular_momentum"]
+
+
     registry.add_field((ptype, "particle_angular_momentum"),
               function=_particle_angular_momentum,
               particle_type=True,
@@ -405,9 +435,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        pos = pos.T
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos = modify_reference_frame(center, normal, P=pos)
         return pos
 
@@ -429,9 +457,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        pos = pos.T
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos, = modify_reference_frame(center, normal, P=pos)
         pos = pos.T
         return pos[0]
@@ -454,9 +480,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        pos = pos.T
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos = modify_reference_frame(center, normal, P=pos)
         pos = pos.T
         return pos[1]
@@ -479,10 +503,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        pos = pos.T
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos = modify_reference_frame(center, normal, P=pos)
         pos = pos.T
         return pos[2]
@@ -504,10 +525,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        vel = vel - np.reshape(bv, (3, 1))
-        vel = vel.T
+        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         return vel
 
@@ -529,10 +547,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        vel = vel - np.reshape(bv, (3, 1))
-        vel = vel.T
+        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         vel = vel.T
         return vel[0]
@@ -555,10 +570,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter('bulk_velocity')
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        vel = vel - np.reshape(bv, (3, 1))
-        vel = vel.T
+        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         vel = vel.T
         return vel[1]
@@ -581,10 +593,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        bv = vel - np.reshape(bv, (3, 1))
-        vel = vel.T
+        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         vel = vel.T
         return vel[2]
@@ -621,8 +630,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter("center")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_sph_theta(pos, normal), "")
 
@@ -651,8 +659,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter("center")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_sph_phi(pos, normal), "")
 
@@ -683,10 +690,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -728,10 +733,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -858,10 +861,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_cyl_theta(pos, center)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
@@ -884,10 +885,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_cyl_theta(pos, center)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
@@ -920,10 +919,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         cylz = get_cyl_z_component(vel, normal)
@@ -1044,3 +1041,4 @@
                        units = "g/cm**3")
     return [field_name]
 
+

diff -r 7fe9f94e6991070c0888113d7995d543538836b7 -r 7534439b02fd5bbbe9653408ff6af9c3356b8700 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -301,17 +301,36 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
+
+    # First translate the positions to center of mass reference frame.
+    if P is not None:
+        P = P - CoM
+
     if (L == np.array([0, 0, 1.])).all():
-        # Whew! Nothing to do!
+        # Whew! No rotation to do!
         if V is None:
             return L, P
-        if P is None:
+        elif P is None:
             return L, V
         else:
             return L, P, V
-    # First translate the positions to center of mass reference frame.
-    if P is not None:
-        P = P - CoM
+
+    if (L == np.array([0, 0, -1.])).all():
+        # Just a simple flip of axis to do!
+        if P is not None:
+            P = -P
+        if V is not None:
+            V = -V
+
+        if V is None:
+            return L, P
+        elif P is None:
+            return L, V
+        else:
+            return L, P, V
+
+    # Normal vector is not aligned with simulation Z axis
+    # Therefore we are going to have to apply a rotation
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
     LL[2] = 0.


https://bitbucket.org/yt_analysis/yt/commits/aabb29692aea/
Changeset:   aabb29692aea
Branch:      yt
User:        Ben Thompson
Date:        2015-05-12 20:29:45+00:00
Summary:     specific angular momentum vector now called in the right units
Affected #:  1 file

diff -r 7534439b02fd5bbbe9653408ff6af9c3356b8700 -r aabb29692aea377f5359f6a5a2cde642f33b4aa7 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -76,16 +76,6 @@
         return rv
     return _AllFields
 
-def get_angular_momentum_componants(ptype, data, spos, svel):
-    if data.has_field_parameter("normal"):
-       normal = data.get_field_parameter("normal")
-    else:
-       normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
-    vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
-    return pos, vel, normal, bv
-
 def _field_concat_slice(fname, axi):
     def _AllFields(field, data):
         v = []
@@ -252,6 +242,16 @@
                        units = "cm / s",
                        particle_type=True)
 
+def get_angular_momentum_componants(ptype, data, spos, svel):
+    if data.has_field_parameter("normal"):
+       normal = data.get_field_parameter("normal")
+    else:
+       normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
+    vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+    return pos, vel, normal, bv
+
 def standard_particle_fields(registry, ptype,
                              spos = "particle_position_%s",
                              svel = "particle_velocity_%s"):
@@ -278,9 +278,15 @@
         particle.
         """
         center = data.get_field_parameter('center')
-        pos, vel, normal, bv = get_angular_momentum_componants(ptype, data, spos, svel)
+        if data.has_field_parameter("normal"):
+           normal = data.get_field_parameter("normal")
+        else:
+           normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
+        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)
-        return np.cross(r_vec, v_vec)
+        return YTArray(np.cross(r_vec.in_units("cm"), v_vec.in_units("cm/s")),"cm**2/s")
 
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),


https://bitbucket.org/yt_analysis/yt/commits/b54b4413b117/
Changeset:   b54b4413b117
Branch:      yt
User:        Ben Thompson
Date:        2015-05-12 20:40:42+00:00
Summary:     switched all the YTArray() for data.ds.arr()
Affected #:  1 file

diff -r aabb29692aea377f5359f6a5a2cde642f33b4aa7 -r b54b4413b1176fe400ac5a39090d6ae2a434b759 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -242,14 +242,14 @@
                        units = "cm / s",
                        particle_type=True)
 
-def get_angular_momentum_componants(ptype, data, spos, svel):
+def get_angular_momentum_components(ptype, data, spos, svel):
     if data.has_field_parameter("normal"):
        normal = data.get_field_parameter("normal")
     else:
        normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
     bv = data.get_field_parameter("bulk_velocity")
-    pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
-    vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+    pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
+    vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
     return pos, vel, normal, bv
 
 def standard_particle_fields(registry, ptype,
@@ -278,15 +278,9 @@
         particle.
         """
         center = data.get_field_parameter('center')
-        if data.has_field_parameter("normal"):
-           normal = data.get_field_parameter("normal")
-        else:
-           normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
-        bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
-        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+        pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
         L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)
-        return YTArray(np.cross(r_vec.in_units("cm"), v_vec.in_units("cm/s")),"cm**2/s")
+        return data.ds.arr(np.cross(r_vec.in_units("cm"), v_vec.in_units("cm/s")),"cm**2/s")
 
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),
@@ -307,7 +301,7 @@
         vector is defined.
         """
         center = data.get_field_parameter('center')
-        pos, vel, normal, bv = get_angular_momentum_componants(ptype, data, spos, svel)
+        pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
         L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
         pos = pos.T
         vel = vel.T
@@ -332,7 +326,7 @@
         vector is defined.
         """
         center = data.get_field_parameter('center')
-        pos, vel, normal, bv = get_angular_momentum_componants(ptype, data, spos, svel)
+        pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
         L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
         pos = pos.T
         vel = vel.T
@@ -358,7 +352,7 @@
         vector is defined.
         """
         center = data.get_field_parameter('center')
-        pos, vel, normal, bv = get_angular_momentum_componants(ptype, data, spos, svel)
+        pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
         L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
         pos = pos.T
         vel = vel.T
@@ -441,7 +435,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos = modify_reference_frame(center, normal, P=pos)
         return pos
 
@@ -463,7 +457,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos, = modify_reference_frame(center, normal, P=pos)
         pos = pos.T
         return pos[0]
@@ -486,7 +480,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos = modify_reference_frame(center, normal, P=pos)
         pos = pos.T
         return pos[1]
@@ -509,7 +503,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"]).T
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos = modify_reference_frame(center, normal, P=pos)
         pos = pos.T
         return pos[2]
@@ -531,7 +525,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+        vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         return vel
 
@@ -553,7 +547,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+        vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         vel = vel.T
         return vel[0]
@@ -576,7 +570,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter('bulk_velocity')
-        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+        vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         vel = vel.T
         return vel[1]
@@ -599,7 +593,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        vel = YTArray([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+        vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         vel = vel.T
         return vel[2]
@@ -636,7 +630,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter("center")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_sph_theta(pos, normal), "")
 
@@ -665,7 +659,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter("center")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_sph_phi(pos, normal), "")
 
@@ -696,8 +690,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
-        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -739,8 +733,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
-        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -774,8 +768,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
-        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
@@ -807,7 +801,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_cyl_r(pos, normal),
                            'code_length')
@@ -827,7 +821,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_cyl_theta(pos, normal), "")
 
@@ -846,7 +840,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_cyl_z(pos, normal),
                            'code_length')
@@ -867,8 +861,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
-        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_cyl_theta(pos, center)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
@@ -891,8 +885,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
-        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_cyl_theta(pos, center)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
@@ -925,8 +919,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
-        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         cylz = get_cyl_z_component(vel, normal)


https://bitbucket.org/yt_analysis/yt/commits/fdbb670cae98/
Changeset:   fdbb670cae98
Branch:      yt
User:        Ben Thompson
Date:        2015-05-12 21:55:21+00:00
Summary:     fixing a few bugs. Each field should now be returning the correct units
Affected #:  2 files

diff -r b54b4413b1176fe400ac5a39090d6ae2a434b759 -r fdbb670cae984dbff3a8b53079b32debbbadb2f5 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -23,7 +23,8 @@
     ValidateSpatial
 
 from yt.units.yt_array import \
-    uconcatenate
+    uconcatenate, \
+    ucross
 
 from yt.utilities.math_utils import \
     get_sph_r_component, \
@@ -280,7 +281,7 @@
         center = data.get_field_parameter('center')
         pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
         L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)
-        return data.ds.arr(np.cross(r_vec.in_units("cm"), v_vec.in_units("cm/s")),"cm**2/s")
+        return ucross(r_vec, v_vec)
 
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),
@@ -367,7 +368,7 @@
               validators=[ValidateParameter("center")])
 
     create_magnitude_field(registry, "particle_specific_angular_momentum",
-                           "cm**2/s", ftype=ptype, particle_type=True)
+                           "code_length**2/s", ftype=ptype, particle_type=True)
 
     def _particle_angular_momentum_x(field, data):
         return data[ptype, "particle_mass"] * \
@@ -397,8 +398,8 @@
              validators=[ValidateParameter('center')])
 
     def _particle_angular_momentum(field, data):
-        return data[ptype, "particle_mass"] \
-            * data[ptype, "particle_specific_angular_momentum"]
+        return np.multiply(data[ptype, "particle_mass"], 
+             data[ptype, "particle_specific_angular_momentum"].T).T
 
 
     registry.add_field((ptype, "particle_angular_momentum"),

diff -r b54b4413b1176fe400ac5a39090d6ae2a434b759 -r fdbb670cae984dbff3a8b53079b32debbbadb2f5 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1224,6 +1224,12 @@
     v = validate_numpy_wrapper_units(v, arrs)
     return v
 
+def ucross(arr1,arr2):
+   v = np.cross(arr1,arr2)
+   units = arr1.units * arr2.units
+   arr = YTArray(v,units)
+   return arr
+
 def uintersect1d(arr1, arr2, assume_unique=False):
     """Find the sorted unique elements of the two input arrays.
 


https://bitbucket.org/yt_analysis/yt/commits/5f0320ab4769/
Changeset:   5f0320ab4769
Branch:      yt
User:        Ben Thompson
Date:        2015-05-12 23:40:37+00:00
Summary:     changed the angular momentum output slightly
Affected #:  1 file

diff -r fdbb670cae984dbff3a8b53079b32debbbadb2f5 -r 5f0320ab4769d99825b58e49f9620c38ff494fa4 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -368,7 +368,7 @@
               validators=[ValidateParameter("center")])
 
     create_magnitude_field(registry, "particle_specific_angular_momentum",
-                           "code_length**2/s", ftype=ptype, particle_type=True)
+                           "cm**2/s", ftype=ptype, particle_type=True)
 
     def _particle_angular_momentum_x(field, data):
         return data[ptype, "particle_mass"] * \
@@ -398,8 +398,8 @@
              validators=[ValidateParameter('center')])
 
     def _particle_angular_momentum(field, data):
-        return np.multiply(data[ptype, "particle_mass"], 
-             data[ptype, "particle_specific_angular_momentum"].T).T
+	am = data[ptype, "particle_mass"] * data[ptype, "particle_specific_angular_momentum"].T
+	return am.T
 
 
     registry.add_field((ptype, "particle_angular_momentum"),


https://bitbucket.org/yt_analysis/yt/commits/c9f2b5ee8c12/
Changeset:   c9f2b5ee8c12
Branch:      yt
User:        Ben Thompson
Date:        2015-05-21 21:01:59+00:00
Summary:     get_cyl_z now returns an absolute value, and not a negative value.
If you want a non absolute (i.e allow for negative) value, then use particle_position_relative_z
where abs(particle_position_relative_z) should be the same as particle_position_cylindrical_z
Affected #:  1 file

diff -r 5f0320ab4769d99825b58e49f9620c38ff494fa4 -r c9f2b5ee8c1244b7a7e546b11fe6e275b21033b9 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -908,7 +908,8 @@
     tile_shape = [1] + list(coords.shape)[1:]
     J = np.tile(res_normal, tile_shape)
 
-    return np.sum(J*coords, axis=0)  
+    # returns the absolute value
+    return np.abs(np.sum(J*coords, axis=0))  
 
 def get_cyl_theta(coords, normal):
     # This is identical to the spherical phi component


https://bitbucket.org/yt_analysis/yt/commits/26b3822f61f0/
Changeset:   26b3822f61f0
Branch:      yt
User:        Ben Thompson
Date:        2015-05-21 21:08:07+00:00
Summary:     changing the absolute from math_utils to the particle_fields particle_position_cylindrical_z field
Affected #:  2 files

diff -r c9f2b5ee8c1244b7a7e546b11fe6e275b21033b9 -r 26b3822f61f0529afce0af09bda1c48422edfc0c yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -843,7 +843,7 @@
         center = data.get_field_parameter('center')
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
-        return data.ds.arr(get_cyl_z(pos, normal),
+        return data.ds.arr(np.abs(get_cyl_z(pos, normal)),
                            'code_length')
 
     registry.add_field(

diff -r c9f2b5ee8c1244b7a7e546b11fe6e275b21033b9 -r 26b3822f61f0529afce0af09bda1c48422edfc0c yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -909,7 +909,7 @@
     J = np.tile(res_normal, tile_shape)
 
     # returns the absolute value
-    return np.abs(np.sum(J*coords, axis=0))  
+    return np.sum(J*coords, axis=0)  
 
 def get_cyl_theta(coords, normal):
     # This is identical to the spherical phi component


https://bitbucket.org/yt_analysis/yt/commits/9b6a853e5cd1/
Changeset:   9b6a853e5cd1
Branch:      yt
User:        Ben Thompson
Date:        2015-05-21 21:13:42+00:00
Summary:     undid the changes I have made today
Affected #:  1 file

diff -r 26b3822f61f0529afce0af09bda1c48422edfc0c -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -843,7 +843,7 @@
         center = data.get_field_parameter('center')
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
-        return data.ds.arr(np.abs(get_cyl_z(pos, normal)),
+        return data.ds.arr(get_cyl_z(pos, normal),
                            'code_length')
 
     registry.add_field(


https://bitbucket.org/yt_analysis/yt/commits/c85e81b8cf95/
Changeset:   c85e81b8cf95
Branch:      yt
User:        cosmosquark
Date:        2015-05-28 15:39:27+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  24 files

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -13,6 +13,7 @@
 yt/frontends/ramses/_ramses_reader.cpp
 yt/geometry/fake_octree.c
 yt/geometry/grid_container.c
+yt/geometry/grid_visitors.c
 yt/geometry/oct_container.c
 yt/geometry/oct_visitors.c
 yt/geometry/particle_deposit.c
@@ -25,6 +26,7 @@
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/bitarray.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
@@ -39,6 +41,7 @@
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
 yt/utilities/lib/origami.c
+yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
@@ -59,3 +62,4 @@
 doc/source/reference/api/generated/*
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
+dist

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd .python-version
--- /dev/null
+++ b/.python-version
@@ -0,0 +1,1 @@
+2.7.9

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd README
--- a/README
+++ b/README
@@ -20,4 +20,4 @@
 For more information on installation, what to do if you run into problems, or 
 ways to help development, please visit our website.
 
-Enjoy!
+Enjoy!
\ No newline at end of file

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd doc/source/analyzing/time_series_analysis.rst
--- a/doc/source/analyzing/time_series_analysis.rst
+++ b/doc/source/analyzing/time_series_analysis.rst
@@ -79,9 +79,7 @@
 Analyzing an Entire Simulation
 ------------------------------
 
-.. note:: Currently only implemented for Enzo.  Other simulation types coming 
-   soon.  Until then, rely on the above prescription for creating 
-   ``DatasetSeries`` objects.
+.. note:: Implemented for: Enzo, Gadget, OWLS.
 
 The parameter file used to run a simulation contains all the information 
 necessary to know what datasets should be available.  The ``simulation`` 
@@ -93,8 +91,7 @@
 .. code-block:: python
 
   import yt
-  my_sim = yt.simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo',
-                         find_outputs=False)
+  my_sim = yt.simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
 
 Then, create a ``DatasetSeries`` object with the 
 :meth:`frontends.enzo.simulation_handling.EnzoSimulation.get_time_series` 
@@ -123,10 +120,10 @@
 to select a subset of the total data:
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when 
-  gathering datasets for time series.  Default: True.
+  gathering datasets for time series.  Default: True.  (Enzo only)
 
 * ``redshift_data`` (*bool*): Whether or not to include redshift outputs 
-  when gathering datasets for time series.  Default: True.
+  when gathering datasets for time series.  Default: True.  (Enzo only)
 
 * ``initial_time`` (*float*): The earliest time for outputs to be included.  
   If None, the initial time of the simulation is used.  This can be used in 
@@ -139,15 +136,12 @@
 * ``times`` (*list*): A list of times for which outputs will be found.
   Default: None.
 
-* ``time_units`` (*str*): The time units used for requesting outputs by time.
-  Default: '1' (code units).
-
 * ``initial_redshift`` (*float*): The earliest redshift for outputs to be 
   included.  If None, the initial redshift of the simulation is used.  This
   can be used in combination with either ``final_time`` or ``final_redshift``.
   Default: None.
 
-* ``final_time`` (*float*): The latest redshift for outputs to be included.  
+* ``final_redshift`` (*float*): The latest redshift for outputs to be included.  
   If None, the final redshift of the simulation is used.  This can be used 
   in combination with either ``initial_time`` or ``initial_redshift``.  
   Default: None.
@@ -157,11 +151,11 @@
 
 * ``initial_cycle`` (*float*): The earliest cycle for outputs to be 
   included.  If None, the initial cycle of the simulation is used.  This can
-  only be used with final_cycle.  Default: None.
+  only be used with final_cycle.  Default: None.  (Enzo only)
 
 * ``final_cycle`` (*float*): The latest cycle for outputs to be included.  
   If None, the final cycle of the simulation is used.  This can only be used 
-  in combination with initial_cycle.  Default: None.
+  in combination with initial_cycle.  Default: None.  (Enzo only)
 
 * ``tolerance`` (*float*):  Used in combination with ``times`` or ``redshifts`` 
   keywords, this is the tolerance within which outputs are accepted given 

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -635,13 +635,14 @@
    import yt
    ds = yt.load("snapshot_061.hdf5")
 
-However, yt cannot detect raw-binary Gadget data, and so you must specify the
-format as being Gadget:
+Gadget data in raw binary format can also be loaded with the ``load`` command. 
+This is only supported for snapshots created with the ``SnapFormat`` parameter 
+set to 1 (the standard for Gadget-2).
 
 .. code-block:: python
 
    import yt
-   ds = yt.GadgetDataset("snapshot_061")
+   ds = yt.load("snapshot_061")
 
 .. _particle-bbox:
 

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -213,10 +213,31 @@
 ++++++++++++++++++++++++++++++++++++++
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.  These include: a C compiler, ``HDF5``, ``python``,
-``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
-can use ``pip`` (which comes with ``Python``) to install the latest stable
-version of yt:
+installed on your system. 
+
+If you use a Linux OS, use your distro's package manager to install these yt
+dependencies on your system:
+
+- ``HDF5``
+- ``zeromq``
+- ``sqlite`` 
+- ``mercurial``
+
+Then install the required Python packages with ``pip``:
+
+.. code-block:: bash
+
+  $ pip install -r requirements.txt
+
+If you're using IPython notebooks, you can install its dependencies
+with ``pip`` as well:
+
+.. code-block:: bash
+
+  $ pip install -r optional-requirements.txt
+
+From here, you can use ``pip`` (which comes with ``Python``) to install the latest
+stable version of yt:
 
 .. code-block:: bash
 

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd optional-requirements.txt
--- /dev/null
+++ b/optional-requirements.txt
@@ -0,0 +1,1 @@
+ipython[notebook]
\ No newline at end of file

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd requirements.txt
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,6 @@
+numpy==1.9.2 
+matplotlib==1.4.3 
+Cython==0.22 
+h5py==2.5.0 
+nose==1.3.6 
+sympy==0.7.6 

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -127,8 +127,8 @@
         field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
         field_data = {}
         if use_peculiar_velocity:
-            input_fields.append('los_velocity')
-            field_units["los_velocity"] = "cm/s"
+            input_fields.append('velocity_los')
+            field_units["velocity_los"] = "cm/s"
         for feature in self.line_list + self.continuum_list:
             if not feature['field_name'] in input_fields:
                 input_fields.append(feature['field_name'])
@@ -171,7 +171,7 @@
             if use_peculiar_velocity:
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
-                    field_data['los_velocity'] / speed_of_light_cgs
+                    field_data['velocity_los'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
@@ -208,7 +208,7 @@
             if use_peculiar_velocity:
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
-                    field_data['los_velocity'] / speed_of_light_cgs
+                    field_data['velocity_los'] / speed_of_light_cgs
             thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['temperature']) /
                                             (amu_cgs * line['atomic_mass']))
@@ -260,7 +260,7 @@
                 if line['label_threshold'] is not None and \
                         column_density[lixel] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = km_per_cm * field_data['los_velocity'][lixel]
+                        peculiar_velocity = km_per_cm * field_data['velocity_los'][lixel]
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -33,6 +33,13 @@
 
 class LightRay(CosmologySplice):
     """
+    LightRay(parameter_filename, simulation_type=None,
+             near_redshift=None, far_redshift=None,
+             use_minimum_datasets=True, deltaz_min=0.0,
+             minimum_coherent_box_fraction=0.0,
+             time_data=True, redshift_data=True,
+             find_outputs=False, load_kwargs=None):
+
     Create a LightRay object.  A light ray is much like a light cone,
     in that it stacks together multiple datasets in order to extend a
     redshift interval.  Unlike a light cone, which does randomly
@@ -94,6 +101,12 @@
         Whether or not to search for datasets in the current 
         directory.
         Default: False.
+    load_kwargs : optional, dict
+        Optional dictionary of kwargs to be passed to the "load" 
+        function, appropriate for use of certain frontends.  E.g.
+        Tipsy using "bounding_box"
+        Gadget using "unit_base", etc.
+        Default : None
 
     """
     def __init__(self, parameter_filename, simulation_type=None,
@@ -101,7 +114,7 @@
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
-                 find_outputs=False):
+                 find_outputs=False, load_kwargs=None):
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
@@ -109,13 +122,16 @@
         self.deltaz_min = deltaz_min
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
         self.parameter_filename = parameter_filename
-
+        if load_kwargs is None:
+            self.load_kwargs = {}
+        else:
+            self.load_kwargs = load_kwargs
         self.light_ray_solution = []
         self._data = {}
 
         # Make a light ray from a single, given dataset.        
         if simulation_type is None:
-            ds = load(parameter_filename)
+            ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
                 self.cosmology = Cosmology(
@@ -243,6 +259,12 @@
                        get_los_velocity=True, redshift=None,
                        njobs=-1):
         """
+        make_light_ray(seed=None, start_position=None, end_position=None,
+                       trajectory=None, fields=None, setup_function=None,
+                       solution_filename=None, data_filename=None,
+                       get_los_velocity=True, redshift=None,
+                       njobs=-1)
+
         Create a light ray and get field values for each lixel.  A light
         ray consists of a list of field values for cells intersected by
         the ray and the path length of the ray through those cells.
@@ -343,9 +365,9 @@
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_los_velocity:
-            all_fields.extend(['x-velocity', 'y-velocity',
-                               'z-velocity', 'los_velocity'])
-            data_fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
+            all_fields.extend(['velocity_x', 'velocity_y',
+                               'velocity_z', 'velocity_los'])
+            data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
@@ -353,7 +375,7 @@
                                                        njobs=njobs):
 
             # Load dataset for segment.
-            ds = load(my_segment['filename'])
+            ds = load(my_segment['filename'], **self.load_kwargs)
 
             my_segment['unique_identifier'] = ds.unique_identifier
             if redshift is not None:
@@ -364,11 +386,15 @@
 
             if setup_function is not None:
                 setup_function(ds)
-            
-            my_segment["start"] = ds.domain_width * my_segment["start"] + \
-                ds.domain_left_edge
-            my_segment["end"] = ds.domain_width * my_segment["end"] + \
-                ds.domain_left_edge
+
+            if start_position is not None:
+                my_segment["start"] = ds.arr(my_segment["start"], "code_length")
+                my_segment["end"] = ds.arr(my_segment["end"], "code_length")
+            else:
+                my_segment["start"] = ds.domain_width * my_segment["start"] + \
+                  ds.domain_left_edge
+                my_segment["end"] = ds.domain_width * my_segment["end"] + \
+                  ds.domain_left_edge
 
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
@@ -412,10 +438,10 @@
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = ds.arr([sub_ray['x-velocity'],
-                                      sub_ray['y-velocity'],
-                                      sub_ray['z-velocity']])
-                    sub_data['los_velocity'].extend((np.rollaxis(sub_vel, 1) *
+                    sub_vel = ds.arr([sub_ray['velocity_x'],
+                                      sub_ray['velocity_y'],
+                                      sub_ray['velocity_z']])
+                    sub_data['velocity_los'].extend((np.rollaxis(sub_vel, 1) *
                                                      line_of_sight).sum(axis=1)[asort])
                     del sub_vel
 
@@ -423,7 +449,6 @@
                 del sub_ray, asort
 
             for key in sub_data:
-                if key in "xyz": continue
                 sub_data[key] = ds.arr(sub_data[key]).in_cgs()
 
             # Get redshift for each lixel.  Assume linear relation between l and z.
@@ -461,18 +486,32 @@
 
     @parallel_root_only
     def _write_light_ray(self, filename, data):
-        "Write light ray data to hdf5 file."
+        """
+        _write_light_ray(filename, data)
+
+        Write light ray data to hdf5 file.
+        """
 
         mylog.info("Saving light ray data to %s." % filename)
         output = h5py.File(filename, 'w')
         for field in data.keys():
-            output.create_dataset(field, data=data[field])
-            output[field].attrs["units"] = str(data[field].units)
+            # if the field is a tuple, only use the second part of the tuple
+            # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
+            if isinstance(field, tuple):
+                fieldname = field[1]
+            else:
+                fieldname = field
+            output.create_dataset(fieldname, data=data[field])
+            output[fieldname].attrs["units"] = str(data[field].units)
         output.close()
 
     @parallel_root_only
     def _write_light_ray_solution(self, filename, extra_info=None):
-        "Write light ray solution to a file."
+        """
+        _write_light_ray_solution(filename, extra_info=None)
+
+        Write light ray solution to a file.
+        """
 
         mylog.info("Writing light ray solution to %s." % filename)
         f = open(filename, 'w')
@@ -490,7 +529,11 @@
         f.close()
 
 def _flatten_dict_list(data, exceptions=None):
-    "Flatten the list of dicts into one dict."
+    """
+    _flatten_dict_list(data, exceptions=None)
+
+    Flatten the list of dicts into one dict.
+    """
 
     if exceptions is None: exceptions = []
     new_data = {}
@@ -505,12 +548,20 @@
     return new_data
 
 def vector_length(start, end):
-    "Calculate vector length."
+    """
+    vector_length(start, end)
+    
+    Calculate vector length.
+    """
 
     return np.sqrt(np.power((end - start), 2).sum())
 
 def periodic_distance(coord1, coord2):
-    "Calculate length of shortest vector between to points in periodic domain."
+    """
+    periodic_distance(coord1, coord2)
+
+    Calculate length of shortest vector between to points in periodic domain.
+    """
     dif = coord1 - coord2
 
     dim = np.ones(coord1.shape,dtype=int)
@@ -524,6 +575,8 @@
 
 def periodic_ray(start, end, left=None, right=None):
     """
+    periodic_ray(start, end, left=None, right=None)
+
     Break up periodic ray into non-periodic segments. 
     Accepts start and end points of periodic ray as YTArrays.
     Accepts optional left and right edges of periodic volume as YTArrays.

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -406,6 +406,7 @@
         self.basename = os.path.basename(parameter_filename)
         self.directory = os.path.dirname(parameter_filename)
         self.parameters = {}
+        self.key_parameters = []
 
         # Set some parameter defaults.
         self._set_parameter_defaults()
@@ -420,6 +421,21 @@
         
         self.print_key_parameters()
 
+    def _set_parameter_defaults(self):
+        pass
+
+    def _parse_parameter_file(self):
+        pass
+
+    def _set_units(self):
+        pass
+
+    def _calculate_simulation_bounds(self):
+        pass
+
+    def _get_all_outputs(**kwargs):
+        pass
+        
     def __repr__(self):
         return self.parameter_filename
 
@@ -445,23 +461,78 @@
         """
         Print out some key parameters for the simulation.
         """
-        for a in ["domain_dimensions", "domain_left_edge",
-                  "domain_right_edge", "initial_time", "final_time",
-                  "stop_cycle", "cosmological_simulation"]:
-            if not hasattr(self, a):
-                mylog.error("Missing %s in dataset definition!", a)
-                continue
-            v = getattr(self, a)
-            mylog.info("Parameters: %-25s = %s", a, v)
-        if hasattr(self, "cosmological_simulation") and \
-           getattr(self, "cosmological_simulation"):
+        if self.simulation_type == "grid":
+            for a in ["domain_dimensions", "domain_left_edge",
+                      "domain_right_edge"]:
+                self._print_attr(a)
+        for a in ["initial_time", "final_time",
+                  "cosmological_simulation"]:
+            self._print_attr(a)
+        if getattr(self, "cosmological_simulation", False):
             for a in ["box_size", "omega_lambda",
                       "omega_matter", "hubble_constant",
                       "initial_redshift", "final_redshift"]:
-                if not hasattr(self, a):
-                    mylog.error("Missing %s in dataset definition!", a)
-                    continue
-                v = getattr(self, a)
-                mylog.info("Parameters: %-25s = %s", a, v)
+                self._print_attr(a)
+        for a in self.key_parameters:
+            self._print_attr(a)
         mylog.info("Total datasets: %d." % len(self.all_outputs))
 
+    def _print_attr(self, a):
+        """
+        Print the attribute or warn about it missing.
+        """
+        if not hasattr(self, a):
+            mylog.error("Missing %s in dataset definition!", a)
+            return
+        v = getattr(self, a)
+        mylog.info("Parameters: %-25s = %s", a, v)
+
+    def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
+        r"""
+        Get datasets at or near to given values.
+
+        Parameters
+        ----------
+        key: str
+            The key by which to retrieve outputs, usually 'time' or
+            'redshift'.
+        values: array_like
+            A list of values, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the value is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default: None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
+
+        """
+
+        if not isinstance(values, YTArray):
+            if isinstance(values, tuple) and len(values) == 2:
+                values = self.arr(*values)
+            else:
+                values = self.arr(values)
+        values = values.in_cgs()
+
+        if outputs is None:
+            outputs = self.all_outputs
+        my_outputs = []
+        if not outputs:
+            return my_outputs
+        for value in values:
+            outputs.sort(key=lambda obj:np.abs(value - obj[key]))
+            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
+                    and outputs[0] not in my_outputs:
+                my_outputs.append(outputs[0])
+            else:
+                mylog.error("No dataset added for %s = %f.", key, value)
+
+        outputs.sort(key=lambda obj: obj['time'])
+        return my_outputs

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -808,8 +808,10 @@
         # set the periodicity based on the runtime parameters
         periodicity = [True, True, True]
         if not self.parameters['-x'] == "interior": periodicity[0] = False
-        if not self.parameters['-y'] == "interior": periodicity[1] = False
-        if not self.parameters['-z'] == "interior": periodicity[2] = False
+        if self.dimensionality >= 2:
+            if not self.parameters['-y'] == "interior": periodicity[1] = False
+        if self.dimensionality == 3:
+            if not self.parameters['-z'] == "interior": periodicity[2] = False
 
         self.periodicity = ensure_tuple(periodicity)
     

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -185,7 +185,7 @@
                     element, weight = field[2:4], field[4:-1]
                 else:
                     element, weight = field[2:3], field[3:-1]
-                weight = int(weight)
+
                 # Here we can, later, add number density.
 
 

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -13,36 +13,34 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.funcs import *
-
 import numpy as np
 import glob
 import os
 
 from yt.convenience import \
-    load
+    load, \
+    only_on_root
 from yt.data_objects.time_series import \
     SimulationTimeSeries, DatasetSeries
 from yt.units import dimensions
 from yt.units.unit_registry import \
-     UnitRegistry
+    UnitRegistry
 from yt.units.yt_array import \
-     YTArray, YTQuantity
+    YTArray, YTQuantity
 from yt.utilities.cosmology import \
     Cosmology
-from yt.utilities.definitions import \
-    sec_conversion
 from yt.utilities.exceptions import \
     InvalidSimulationTimeSeries, \
     MissingParameter, \
     NoStoppingCondition
+from yt.utilities.logger import ytLogger as \
+    mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
-from yt.utilities.physical_constants import \
-    gravitational_constant_cgs as G
-
+    
 class EnzoSimulation(SimulationTimeSeries):
-    r"""Initialize an Enzo Simulation object.
+    r"""
+    Initialize an Enzo Simulation object.
 
     Upon creation, the parameter file is parsed and the time and redshift
     are calculated and stored in all_outputs.  A time units dictionary is
@@ -63,14 +61,8 @@
 
     Examples
     --------
-    >>> from yt.mods import *
-    >>> es = EnzoSimulation("my_simulation.par")
-    >>> es.get_time_series()
-    >>> for ds in es:
-    ...     print ds.current_time
-
-    >>> from yt.mods import *
-    >>> es = simulation("my_simulation.par", "Enzo")
+    >>> import yt
+    >>> es = yt.simulation("my_simulation.par", "Enzo")
     >>> es.get_time_series()
     >>> for ds in es:
     ...     print ds.current_time
@@ -78,7 +70,8 @@
     """
 
     def __init__(self, parameter_filename, find_outputs=False):
-
+        self.simulation_type = "grid"
+        self.key_parameters = ["stop_cycle"]
         SimulationTimeSeries.__init__(self, parameter_filename,
                                       find_outputs=find_outputs)
 
@@ -87,14 +80,14 @@
         self.unit_registry.lut["code_time"] = (1.0, dimensions.time)
         if self.cosmological_simulation:
             # Instantiate EnzoCosmology object for units and time conversions.
-            self.enzo_cosmology = \
+            self.cosmology = \
               EnzoCosmology(self.parameters['CosmologyHubbleConstantNow'],
                             self.parameters['CosmologyOmegaMatterNow'],
                             self.parameters['CosmologyOmegaLambdaNow'],
                             0.0, self.parameters['CosmologyInitialRedshift'],
                             unit_registry=self.unit_registry)
 
-            self.time_unit = self.enzo_cosmology.time_unit.in_units("s")
+            self.time_unit = self.cosmology.time_unit.in_units("s")
             self.unit_registry.modify("h", self.hubble_constant)
             # Comoving lengths
             for my_unit in ["m", "pc", "AU", "au"]:
@@ -160,7 +153,7 @@
             used in combination with either final_time or
             final_redshift.
             Default: None.
-        final_time : float
+        final_redshift : float
             The latest redshift for outputs to be included.  If None,
             the final redshift of the simulation is used.  This can be
             used in combination with either initial_time or
@@ -197,8 +190,8 @@
         Examples
         --------
 
-        >>> from yt.mods import *
-        >>> es = simulation("my_simulation.par", "Enzo")
+        >>> import yt
+        >>> es = yt.simulation("my_simulation.par", "Enzo")
         
         >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), 
                                redshift_data=False)
@@ -207,8 +200,6 @@
 
         >>> es.get_time_series(final_cycle=100000)
 
-        >>> es.get_time_series(find_outputs=True)
-
         >>> # after calling get_time_series
         >>> for ds in es.piter():
         ...     p = ProjectionPlot(ds, 'x', "density")
@@ -226,7 +217,9 @@
         if (initial_redshift is not None or \
             final_redshift is not None) and \
             not self.cosmological_simulation:
-            raise InvalidSimulationTimeSeries('An initial or final redshift has been given for a noncosmological simulation.')
+            raise InvalidSimulationTimeSeries(
+                "An initial or final redshift has been given for a " +
+                "noncosmological simulation.")
 
         if time_data and redshift_data:
             my_all_outputs = self.all_outputs
@@ -244,12 +237,14 @@
 
         # Apply selection criteria to the set.
         if times is not None:
-            my_outputs = self._get_outputs_by_time(times, tolerance=tolerance,
-                                                   outputs=my_all_outputs)
+            my_outputs = self._get_outputs_by_key("time", times,
+                                                  tolerance=tolerance,
+                                                  outputs=my_all_outputs)
 
         elif redshifts is not None:
-            my_outputs = self._get_outputs_by_redshift(redshifts, tolerance=tolerance,
-                                                       outputs=my_all_outputs)
+            my_outputs = self._get_outputs_by_key("redshift", redshifts,
+                                                  tolerance=tolerance,
+                                                  outputs=my_all_outputs)
 
         elif initial_cycle is not None or final_cycle is not None:
             if initial_cycle is None:
@@ -272,9 +267,11 @@
                 elif isinstance(initial_time, tuple) and len(initial_time) == 2:
                     initial_time = self.quan(*initial_time)
                 elif not isinstance(initial_time, YTArray):
-                    raise RuntimeError("Error: initial_time must be given as a float or tuple of (value, units).")
+                    raise RuntimeError(
+                        "Error: initial_time must be given as a float or " +
+                        "tuple of (value, units).")
             elif initial_redshift is not None:
-                my_initial_time = self.enzo_cosmology.t_from_z(initial_redshift)
+                my_initial_time = self.cosmology.t_from_z(initial_redshift)
             else:
                 my_initial_time = self.initial_time
 
@@ -284,10 +281,12 @@
                 elif isinstance(final_time, tuple) and len(final_time) == 2:
                     final_time = self.quan(*final_time)
                 elif not isinstance(final_time, YTArray):
-                    raise RuntimeError("Error: final_time must be given as a float or tuple of (value, units).")
+                    raise RuntimeError(
+                        "Error: final_time must be given as a float or " +
+                        "tuple of (value, units).")
                 my_final_time = final_time.in_units("s")
             elif final_redshift is not None:
-                my_final_time = self.enzo_cosmology.t_from_z(final_redshift)
+                my_final_time = self.cosmology.t_from_z(final_redshift)
             else:
                 my_final_time = self.final_time
 
@@ -390,8 +389,9 @@
                     raise MissingParameter(self.parameter_filename, v)
                 setattr(self, a, self.parameters[v])
         else:
+            self.cosmological_simulation = 0
             self.omega_lambda = self.omega_matter = \
-                self.hubble_constant = self.cosmological_simulation = 0.0
+                self.hubble_constant = 0.0
 
         # make list of redshift outputs
         self.all_redshift_outputs = []
@@ -405,16 +405,10 @@
             del output['index']
         self.all_redshift_outputs = redshift_outputs
 
-    def _calculate_redshift_dump_times(self):
-        "Calculates time from redshift of redshift outputs."
-
-        if not self.cosmological_simulation: return
-        for output in self.all_redshift_outputs:
-            output['time'] = self.enzo_cosmology.t_from_z(output['redshift'])
-        self.all_redshift_outputs.sort(key=lambda obj:obj['time'])
-
     def _calculate_time_outputs(self):
-        "Calculate time outputs and their redshifts if cosmological."
+        """
+        Calculate time outputs and their redshifts if cosmological.
+        """
 
         self.all_time_outputs = []
         if self.final_time is None or \
@@ -432,7 +426,7 @@
             output = {'index': index, 'filename': filename, 'time': current_time.copy()}
             output['time'] = min(output['time'], self.final_time)
             if self.cosmological_simulation:
-                output['redshift'] = self.enzo_cosmology.z_from_t(current_time)
+                output['redshift'] = self.cosmology.z_from_t(current_time)
 
             self.all_time_outputs.append(output)
             if np.abs(self.final_time - current_time) / self.final_time < 1e-4: break
@@ -440,7 +434,9 @@
             index += 1
 
     def _calculate_cycle_outputs(self):
-        "Calculate cycle outputs."
+        """
+        Calculate cycle outputs.
+        """
 
         mylog.warn('Calculating cycle outputs.  Dataset times will be unavailable.')
 
@@ -460,7 +456,9 @@
             index += 1
 
     def _get_all_outputs(self, find_outputs=False):
-        "Get all potential datasets and combine into a time-sorted list."
+        """
+        Get all potential datasets and combine into a time-sorted list.
+        """
 
         # Create the set of outputs from which further selection will be done.
         if find_outputs:
@@ -468,8 +466,12 @@
 
         elif self.parameters['dtDataDump'] > 0 and \
           self.parameters['CycleSkipDataDump'] > 0:
-            mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set.", self.parameter_filename )
-            mylog.info("    Unable to calculate datasets.  Attempting to search in the current directory")
+            mylog.info(
+                "Simulation %s has both dtDataDump and CycleSkipDataDump set.",
+                self.parameter_filename )
+            mylog.info(
+                "    Unable to calculate datasets.  " +
+                "Attempting to search in the current directory")
             self._find_outputs()
 
         else:
@@ -480,7 +482,10 @@
                 self._calculate_time_outputs()
 
             # Calculate times for redshift outputs.
-            self._calculate_redshift_dump_times()
+            if self.cosmological_simulation:
+                for output in self.all_redshift_outputs:
+                    output["time"] = self.cosmology.t_from_z(output["redshift"])
+                self.all_redshift_outputs.sort(key=lambda obj:obj["time"])
 
             self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
             if self.parameters['CycleSkipDataDump'] <= 0:
@@ -496,9 +501,9 @@
 
         # Convert initial/final redshifts to times.
         if self.cosmological_simulation:
-            self.initial_time = self.enzo_cosmology.t_from_z(self.initial_redshift)
+            self.initial_time = self.cosmology.t_from_z(self.initial_redshift)
             self.initial_time.units.registry = self.unit_registry
-            self.final_time = self.enzo_cosmology.t_from_z(self.final_redshift)
+            self.final_time = self.cosmology.t_from_z(self.final_redshift)
             self.final_time.units.registry = self.unit_registry
 
         # If not a cosmology simulation, figure out the stopping criteria.
@@ -516,11 +521,15 @@
                     'StopCycle' in self.parameters):
                 raise NoStoppingCondition(self.parameter_filename)
             if self.final_time is None:
-                mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.',
-                           self.parameter_filename)
+                mylog.warn(
+                    "Simulation %s has no stop time set, stopping condition " +
+                    "will be based only on cycles.",
+                    self.parameter_filename)
 
     def _set_parameter_defaults(self):
-        "Set some default parameters to avoid problems if they are not in the parameter file."
+        """
+        Set some default parameters to avoid problems if they are not in the parameter file.
+        """
 
         self.parameters['GlobalDir'] = self.directory
         self.parameters['DataDumpName'] = "data"
@@ -570,7 +579,9 @@
                 self.final_redshift = self.all_outputs[-1]['redshift']
 
     def _check_for_outputs(self, potential_outputs):
-        r"""Check a list of files to see if they are valid datasets."""
+        """
+        Check a list of files to see if they are valid datasets.
+        """
 
         only_on_root(mylog.info, "Checking %d potential outputs.", 
                      len(potential_outputs))
@@ -603,112 +614,10 @@
 
         return my_outputs
 
-    def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
-        r"""Get datasets at or near to given values.
-
-        Parameters
-        ----------
-        key: str
-            The key by which to retrieve outputs, usually 'time' or
-            'redshift'.
-        values: array_like
-            A list of values, given as floats.
-        tolerance : float
-            If not None, do not return a dataset unless the value is
-            within the tolerance value.  If None, simply return the
-            nearest dataset.
-            Default: None.
-        outputs : list
-            The list of outputs from which to choose.  If None,
-            self.all_outputs is used.
-            Default: None.
-
-        Examples
-        --------
-        >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
-
-        """
-
-        if not isinstance(values, np.ndarray):
-            values = ensure_list(values)
-        if outputs is None:
-            outputs = self.all_outputs
-        my_outputs = []
-        if not outputs:
-            return my_outputs
-        for value in values:
-            outputs.sort(key=lambda obj:np.abs(value - obj[key]))
-            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
-                    and outputs[0] not in my_outputs:
-                my_outputs.append(outputs[0])
-            else:
-                mylog.error("No dataset added for %s = %f.", key, value)
-
-        outputs.sort(key=lambda obj: obj['time'])
-        return my_outputs
-
-    def _get_outputs_by_redshift(self, redshifts, tolerance=None, outputs=None):
-        r"""Get datasets at or near to given redshifts.
-
-        Parameters
-        ----------
-        redshifts: array_like
-            A list of redshifts, given as floats.
-        tolerance : float
-            If not None, do not return a dataset unless the value is
-            within the tolerance value.  If None, simply return the
-            nearest dataset.
-            Default: None.
-        outputs : list
-            The list of outputs from which to choose.  If None,
-            self.all_outputs is used.
-            Default: None.
-
-        Examples
-        --------
-        >>> datasets = es.get_outputs_by_redshift([0, 1, 2], tolerance=0.1)
-
-        """
-
-        return self._get_outputs_by_key('redshift', redshifts, tolerance=tolerance,
-                                     outputs=outputs)
-
-    def _get_outputs_by_time(self, times, tolerance=None, outputs=None):
-        r"""Get datasets at or near to given times.
-
-        Parameters
-        ----------
-        times: tuple of type (float array, str)
-            A list of times for which outputs will be found and the units 
-            of those values.  For example, ([0, 1, 2, 3], "s").
-        tolerance : float
-            If not None, do not return a dataset unless the time is
-            within the tolerance value.  If None, simply return the
-            nearest dataset.
-            Default = None.
-        outputs : list
-            The list of outputs from which to choose.  If None,
-            self.all_outputs is used.
-            Default: None.
-
-        Examples
-        --------
-        >>> datasets = es.get_outputs_by_time([600, 500, 400], tolerance=10.)
-
-        """
-
-        if not isinstance(times, YTArray):
-            if isinstance(times, tuple) and len(times) == 2:
-                times = self.arr(*times)
-            else:
-                times = self.arr(times, "code_time")
-        times = times.in_units("s")
-        return self._get_outputs_by_key('time', times, tolerance=tolerance,
-                                        outputs=outputs)
-
     def _write_cosmology_outputs(self, filename, outputs, start_index,
                                  decimals=3):
-        r"""Write cosmology output parameters for a cosmology splice.
+        """
+        Write cosmology output parameters for a cosmology splice.
         """
 
         mylog.info("Writing redshift output list to %s.", filename)

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/gadget/api.py
--- a/yt/frontends/gadget/api.py
+++ b/yt/frontends/gadget/api.py
@@ -7,7 +7,7 @@
 """
 
 #-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
+# Copyright (c) 2014-2015, yt Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
@@ -23,4 +23,7 @@
     IOHandlerGadgetBinary, \
     IOHandlerGadgetHDF5
 
+from .simulation_handling import \
+    GadgetSimulation
+
 from . import tests

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -18,6 +18,7 @@
 import h5py
 import numpy as np
 import stat
+import struct
 import os
 import types
 
@@ -242,10 +243,59 @@
         self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
         self.time_unit = self.length_unit / self.velocity_unit
 
+    @staticmethod
+    def _validate_header(filename):
+        '''
+        This method automatically detects whether the Gadget file is big/little endian 
+        and is not corrupt/invalid using the first 4 bytes in the file.  It returns a 
+        tuple of (Valid, endianswap) where Valid is a boolean that is true if the file 
+        is a Gadget binary file, and endianswap is the endianness character '>' or '<'. 
+        '''
+        try:
+            f = open(filename,'rb')
+        except IOError:
+            try:
+                f = open(filename+".0")
+            except IOError:
+                return False, 1
+        
+        # First int32 is 256 for a Gadget2 binary file with SnapFormat=1,
+        # 8 for a Gadget2 binary file with SnapFormat=2 file, 
+        # or the byte swapped equivalents (65536 and 134217728).
+        # The int32 following the header (first 4+256 bytes) must equal this
+        # number.
+        (rhead,) = struct.unpack('<I',f.read(4))
+        # Use value to check endianess
+        if rhead == 256:
+            endianswap = '<'
+        elif rhead == 65536:
+            endianswap = '>'
+        # Disabled for now (does any one still use SnapFormat=2?)
+        # If so, alternate read would be needed based on header.
+        # elif rhead == 8:
+        #     return True, '<'
+        # elif rhead == 134217728:
+        #     return True, '>'
+        else:
+            f.close()
+            return False, 1
+        # Read in particle number from header
+        np0 = sum(struct.unpack(endianswap+'IIIIII',f.read(6*4)))
+        # Read in size of position block. It should be 4 bytes per float, 
+        # with 3 coordinates (x,y,z) per particle. (12 bytes per particle)
+        f.seek(4+256+4,0)
+        np1 = struct.unpack(endianswap+'I',f.read(4))[0]/(4*3)
+        f.close()
+        # Compare
+        if np0 == np1:
+            return True, endianswap
+        else:
+            return False, 1
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        # We do not allow load() of these files.
-        return False
+        # First 4 bytes used to check load
+        return GadgetDataset._validate_header(args[0])[0]
 
 class GadgetHDF5Dataset(GadgetDataset):
     _file_class = ParticleFile

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/gadget/simulation_handling.py
--- /dev/null
+++ b/yt/frontends/gadget/simulation_handling.py
@@ -0,0 +1,484 @@
+"""
+GadgetSimulation class and member functions.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013-2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import glob
+import os
+
+from yt.convenience import \
+    load, \
+    only_on_root
+from yt.data_objects.time_series import \
+    SimulationTimeSeries, DatasetSeries
+from yt.units import dimensions
+from yt.units.unit_registry import \
+    UnitRegistry
+from yt.units.yt_array import \
+    YTArray, YTQuantity
+from yt.utilities.cosmology import \
+    Cosmology
+from yt.utilities.exceptions import \
+    InvalidSimulationTimeSeries, \
+    MissingParameter, \
+    NoStoppingCondition
+from yt.utilities.logger import ytLogger as \
+    mylog
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_objects
+
+class GadgetSimulation(SimulationTimeSeries):
+    r"""
+    Initialize an Gadget Simulation object.
+
+    Upon creation, the parameter file is parsed and the time and redshift
+    are calculated and stored in all_outputs.  A time units dictionary is
+    instantiated to allow for time outputs to be requested with physical
+    time units.  The get_time_series can be used to generate a
+    DatasetSeries object.
+
+    parameter_filename : str
+        The simulation parameter file.
+    find_outputs : bool
+        If True, the OutputDir directory is searched for datasets.  
+        Time and redshift information are gathered by temporarily 
+        instantiating each dataset.  This can be used when simulation 
+        data was created in a non-standard way, making it difficult 
+        to guess the corresponding time and redshift information.
+        Default: False.
+
+    Examples
+    --------
+    >>> import yt
+    >>> gs = yt.simulation("my_simulation.par", "Gadget")
+    >>> gs.get_time_series()
+    >>> for ds in gs:
+    ...     print ds.current_time
+
+    """
+
+    def __init__(self, parameter_filename, find_outputs=False):
+        self.simulation_type = "particle"
+        self.dimensionality = 3
+        SimulationTimeSeries.__init__(self, parameter_filename,
+                                      find_outputs=find_outputs)
+
+    def _set_units(self):
+        self.unit_registry = UnitRegistry()
+        self.time_unit = self.quan(1.0, "s")
+        if self.cosmological_simulation:
+            # Instantiate Cosmology object for units and time conversions.
+            self.cosmology = \
+              Cosmology(hubble_constant=self.hubble_constant,
+                        omega_matter=self.omega_matter,
+                        omega_lambda=self.omega_lambda,
+                        unit_registry=self.unit_registry)
+            self.unit_registry.modify("h", self.hubble_constant)
+            # Comoving lengths
+            for my_unit in ["m", "pc", "AU", "au"]:
+                new_unit = "%scm" % my_unit
+                # technically not true, but should be ok
+                self.unit_registry.add(
+                    new_unit, self.unit_registry.lut[my_unit][0],
+                    dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
+            self.length_unit = self.quan(self.unit_base["UnitLength_in_cm"],
+                                         "cmcm / h", registry=self.unit_registry)
+            self.box_size *= self.length_unit.in_units("Mpccm / h")
+        else:
+            # Read time from file for non-cosmological sim
+            self.time_unit = self.quan(
+                self.unit_base["UnitLength_in_cm"]/ \
+                    self.unit_base["UnitVelocity_in_cm_per_s"], "s")
+            self.unit_registry.add("code_time", 1.0, dimensions.time)
+            self.unit_registry.modify("code_time", self.time_unit)
+            # Length
+            self.length_unit = self.quan(
+                self.unit_base["UnitLength_in_cm"],"cm")
+            self.unit_registry.add("code_length", 1.0, dimensions.length)
+            self.unit_registry.modify("code_length", self.length_unit)
+
+    def get_time_series(self, initial_time=None, final_time=None,
+                        initial_redshift=None, final_redshift=None,
+                        times=None, redshifts=None, tolerance=None,
+                        parallel=True, setup_function=None):
+
+        """
+        Instantiate a DatasetSeries object for a set of outputs.
+
+        If no additional keywords given, a DatasetSeries object will be
+        created with all potential datasets created by the simulation.
+
+        Outputs can be gather by specifying a time or redshift range
+        (or combination of time and redshift), with a specific list of
+        times or redshifts), or by simply searching all subdirectories 
+        within the simulation directory.
+
+        initial_time : tuple of type (float, str)
+            The earliest time for outputs to be included.  This should be 
+            given as the value and the string representation of the units.
+            For example, (5.0, "Gyr").  If None, the initial time of the 
+            simulation is used.  This can be used in combination with 
+            either final_time or final_redshift.
+            Default: None.
+        final_time : tuple of type (float, str)
+            The latest time for outputs to be included.  This should be 
+            given as the value and the string representation of the units.
+            For example, (13.7, "Gyr"). If None, the final time of the 
+            simulation is used.  This can be used in combination with either 
+            initial_time or initial_redshift.
+            Default: None.
+        times : tuple of type (float array, str)
+            A list of times for which outputs will be found and the units 
+            of those values.  For example, ([0, 1, 2, 3], "s").
+            Default: None.
+        initial_redshift : float
+            The earliest redshift for outputs to be included.  If None,
+            the initial redshift of the simulation is used.  This can be
+            used in combination with either final_time or
+            final_redshift.
+            Default: None.
+        final_redshift : float
+            The latest redshift for outputs to be included.  If None,
+            the final redshift of the simulation is used.  This can be
+            used in combination with either initial_time or
+            initial_redshift.
+            Default: None.
+        redshifts : array_like
+            A list of redshifts for which outputs will be found.
+            Default: None.
+        tolerance : float
+            Used in combination with "times" or "redshifts" keywords,
+            this is the tolerance within which outputs are accepted
+            given the requested times or redshifts.  If None, the
+            nearest output is always taken.
+            Default: None.
+        parallel : bool/int
+            If True, the generated DatasetSeries will divide the work
+            such that a single processor works on each dataset.  If an
+            integer is supplied, the work will be divided into that
+            number of jobs.
+            Default: True.
+        setup_function : callable, accepts a ds
+            This function will be called whenever a dataset is loaded.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> gs = yt.simulation("my_simulation.par", "Gadget")
+        
+        >>> gs.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"))
+
+        >>> gs.get_time_series(redshifts=[3, 2, 1, 0])
+
+        >>> # after calling get_time_series
+        >>> for ds in gs.piter():
+        ...     p = ProjectionPlot(ds, "x", "density")
+        ...     p.save()
+
+        >>> # An example using the setup_function keyword
+        >>> def print_time(ds):
+        ...     print ds.current_time
+        >>> gs.get_time_series(setup_function=print_time)
+        >>> for ds in gs:
+        ...     SlicePlot(ds, "x", "Density").save()
+
+        """
+
+        if (initial_redshift is not None or \
+            final_redshift is not None) and \
+            not self.cosmological_simulation:
+            raise InvalidSimulationTimeSeries(
+                "An initial or final redshift has been given for a " +
+                "noncosmological simulation.")
+
+        my_all_outputs = self.all_outputs
+        if not my_all_outputs:
+            DatasetSeries.__init__(self, outputs=[], parallel=parallel,
+                                   unit_base=self.unit_base)
+            mylog.info("0 outputs loaded into time series.")
+            return
+
+        # Apply selection criteria to the set.
+        if times is not None:
+            my_outputs = self._get_outputs_by_key("time", times,
+                                                  tolerance=tolerance,
+                                                  outputs=my_all_outputs)
+
+        elif redshifts is not None:
+            my_outputs = self._get_outputs_by_key("redshift",
+                                                  redshifts, tolerance=tolerance,
+                                                  outputs=my_all_outputs)
+
+        else:
+            if initial_time is not None:
+                if isinstance(initial_time, float):
+                    initial_time = self.quan(initial_time, "code_time")
+                elif isinstance(initial_time, tuple) and len(initial_time) == 2:
+                    initial_time = self.quan(*initial_time)
+                elif not isinstance(initial_time, YTArray):
+                    raise RuntimeError(
+                        "Error: initial_time must be given as a float or " +
+                        "tuple of (value, units).")
+            elif initial_redshift is not None:
+                my_initial_time = self.cosmology.t_from_z(initial_redshift)
+            else:
+                my_initial_time = self.initial_time
+
+            if final_time is not None:
+                if isinstance(final_time, float):
+                    final_time = self.quan(final_time, "code_time")
+                elif isinstance(final_time, tuple) and len(final_time) == 2:
+                    final_time = self.quan(*final_time)
+                elif not isinstance(final_time, YTArray):
+                    raise RuntimeError(
+                        "Error: final_time must be given as a float or " +
+                        "tuple of (value, units).")
+                my_final_time = final_time.in_units("s")
+            elif final_redshift is not None:
+                my_final_time = self.cosmology.t_from_z(final_redshift)
+            else:
+                my_final_time = self.final_time
+
+            my_initial_time.convert_to_units("s")
+            my_final_time.convert_to_units("s")
+            my_times = np.array([a["time"] for a in my_all_outputs])
+            my_indices = np.digitize([my_initial_time, my_final_time], my_times)
+            if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
+            my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
+
+        init_outputs = []
+        for output in my_outputs:
+            if os.path.exists(output["filename"]):
+                init_outputs.append(output["filename"])
+        if len(init_outputs) == 0 and len(my_outputs) > 0:
+            mylog.warn("Could not find any datasets.  " +
+                       "Check the value of OutputDir in your parameter file.")
+            
+        DatasetSeries.__init__(self, outputs=init_outputs, parallel=parallel,
+                                setup_function=setup_function,
+                                unit_base=self.unit_base)
+        mylog.info("%d outputs loaded into time series.", len(init_outputs))
+
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+
+        self.unit_base = {}
+
+        # Let's read the file
+        lines = open(self.parameter_filename).readlines()
+        comments = ["%", ";"]
+        for line in (l.strip() for l in lines):
+            for comment in comments:
+                if comment in line: line = line[0:line.find(comment)]
+            if len(line) < 2: continue
+            param, vals = (i.strip() for i in line.split(None, 1))
+            # First we try to decipher what type of value it is.
+            vals = vals.split()
+            # Special case approaching.
+            if "(do" in vals: vals = vals[:1]
+            if len(vals) == 0:
+                pcast = str # Assume NULL output
+            else:
+                v = vals[0]
+                # Figure out if it's castable to floating point:
+                try:
+                    float(v)
+                except ValueError:
+                    pcast = str
+                else:
+                    if any("." in v or "e" in v for v in vals):
+                        pcast = float
+                    elif v == "inf":
+                        pcast = str
+                    else:
+                        pcast = int
+            # Now we figure out what to do with it.
+            if param.startswith("Unit"):
+                self.unit_base[param] = float(vals[0])
+            if len(vals) == 0:
+                vals = ""
+            elif len(vals) == 1:
+                vals = pcast(vals[0])
+            else:
+                vals = np.array([pcast(i) for i in vals])
+
+            self.parameters[param] = vals
+
+        if self.parameters["ComovingIntegrationOn"]:
+            cosmo_attr = {"box_size": "BoxSize",
+                          "omega_lambda": "OmegaLambda",
+                          "omega_matter": "Omega0",
+                          "hubble_constant": "HubbleParam"}
+            self.initial_redshift = 1.0 / self.parameters["TimeBegin"] - 1.0
+            self.final_redshift = 1.0 / self.parameters["TimeMax"] - 1.0
+            self.cosmological_simulation = 1
+            for a, v in cosmo_attr.items():
+                if not v in self.parameters:
+                    raise MissingParameter(self.parameter_filename, v)
+                setattr(self, a, self.parameters[v])
+        else:
+            self.cosmological_simulation = 0
+            self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = 0.0
+
+    def _snapshot_format(self, index=None):
+        """
+        The snapshot filename for a given index.  Modify this for different 
+        naming conventions.
+        """
+
+        if self.parameters["OutputDir"].startswith("/"):
+            data_dir = self.parameters["OutputDir"]
+        else:
+            data_dir = os.path.join(self.directory,
+                                    self.parameters["OutputDir"])
+        if self.parameters["NumFilesPerSnapshot"] > 1:
+            suffix = ".0"
+        else:
+            suffix = ""
+        if self.parameters["SnapFormat"] == 3:
+            suffix += ".hdf5"
+        if index is None:
+            count = "*"
+        else:
+            count = "%03d" % index
+        filename = "%s_%s%s" % (self.parameters["SnapshotFileBase"],
+                                count, suffix)
+        return os.path.join(data_dir, filename)
+                
+    def _get_all_outputs(self, find_outputs=False):
+        """
+        Get all potential datasets and combine into a time-sorted list.
+        """
+
+        # Create the set of outputs from which further selection will be done.
+        if find_outputs:
+            self._find_outputs()
+        else:
+            if self.parameters["OutputListOn"]:
+                a_values = [float(a) for a in 
+                           file(self.parameters["OutputListFilename"], "r").readlines()]
+            else:
+                a_values = [float(self.parameters["TimeOfFirstSnapshot"])]
+                time_max = float(self.parameters["TimeMax"])
+                while a_values[-1] < time_max:
+                    if self.cosmological_simulation:
+                        a_values.append(
+                            a_values[-1] * self.parameters["TimeBetSnapshot"])
+                    else:
+                        a_values.append(
+                            a_values[-1] + self.parameters["TimeBetSnapshot"])
+                if a_values[-1] > time_max:
+                    a_values[-1] = time_max
+
+            if self.cosmological_simulation:
+                self.all_outputs = \
+                  [{"filename": self._snapshot_format(i),
+                    "redshift": (1. / a - 1)}
+                   for i, a in enumerate(a_values)]
+                
+                # Calculate times for redshift outputs.
+                for output in self.all_outputs:
+                    output["time"] = self.cosmology.t_from_z(output["redshift"])
+            else:
+                self.all_outputs = \
+                  [{"filename": self._snapshot_format(i),
+                    "time": self.quan(a, "code_time")}
+                   for i, a in enumerate(a_values)]
+
+            self.all_outputs.sort(key=lambda obj:obj["time"].to_ndarray())
+
+    def _calculate_simulation_bounds(self):
+        """
+        Figure out the starting and stopping time and redshift for the simulation.
+        """
+
+        # Convert initial/final redshifts to times.
+        if self.cosmological_simulation:
+            self.initial_time = self.cosmology.t_from_z(self.initial_redshift)
+            self.initial_time.units.registry = self.unit_registry
+            self.final_time = self.cosmology.t_from_z(self.final_redshift)
+            self.final_time.units.registry = self.unit_registry
+
+        # If not a cosmology simulation, figure out the stopping criteria.
+        else:
+            if "TimeBegin" in self.parameters:
+                self.initial_time = self.quan(self.parameters["TimeBegin"], "code_time")
+            else:
+                self.initial_time = self.quan(0., "code_time")
+
+            if "TimeMax" in self.parameters:
+                self.final_time = self.quan(self.parameters["TimeMax"], "code_time")
+            else:
+                self.final_time = None
+            if not "TimeMax" in self.parameters:
+                raise NoStoppingCondition(self.parameter_filename)
+
+    def _find_outputs(self):
+        """
+        Search for directories matching the data dump keywords.
+        If found, get dataset times py opening the ds.
+        """
+
+        potential_outputs = glob.glob(self._snapshot_format())
+        self.all_outputs = self._check_for_outputs(potential_outputs)
+        self.all_outputs.sort(key=lambda obj: obj["time"])
+        only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))
+
+        # manually set final time and redshift with last output
+        if self.all_outputs:
+            self.final_time = self.all_outputs[-1]["time"]
+            if self.cosmological_simulation:
+                self.final_redshift = self.all_outputs[-1]["redshift"]
+
+    def _check_for_outputs(self, potential_outputs):
+        r"""
+        Check a list of files to see if they are valid datasets.
+        """
+
+        only_on_root(mylog.info, "Checking %d potential outputs.", 
+                     len(potential_outputs))
+
+        my_outputs = {}
+        for my_storage, output in parallel_objects(potential_outputs, 
+                                                   storage=my_outputs):
+            if os.path.exists(output):
+                try:
+                    ds = load(output)
+                    if ds is not None:
+                        my_storage.result = {"filename": output,
+                                             "time": ds.current_time.in_units("s")}
+                        if ds.cosmological_simulation:
+                            my_storage.result["redshift"] = ds.current_redshift
+                except YTOutputNotIdentified:
+                    mylog.error("Failed to load %s", output)
+        my_outputs = [my_output for my_output in my_outputs.values() \
+                      if my_output is not None]
+        return my_outputs
+
+    def _write_cosmology_outputs(self, filename, outputs, start_index,
+                                 decimals=3):
+        r"""
+        Write cosmology output parameters for a cosmology splice.
+        """
+
+        mylog.info("Writing redshift output list to %s.", filename)
+        f = open(filename, "w")
+        for output in outputs:
+            f.write("%f\n" % (1. / (1. + output["redshift"])))
+        f.close()

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/owls/api.py
--- a/yt/frontends/owls/api.py
+++ b/yt/frontends/owls/api.py
@@ -22,5 +22,8 @@
 
 from .io import \
     IOHandlerOWLS
+
+from .simulation_handling import \
+    OWLSSimulation
     
 from . import tests

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/owls/simulation_handling.py
--- /dev/null
+++ b/yt/frontends/owls/simulation_handling.py
@@ -0,0 +1,78 @@
+"""
+OWLSSimulation class and member functions.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013-2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+
+from yt.frontends.gadget.simulation_handling import \
+    GadgetSimulation
+
+class OWLSSimulation(GadgetSimulation):
+    r"""
+    Initialize an OWLS Simulation object.
+
+    Upon creation, the parameter file is parsed and the time and redshift
+    are calculated and stored in all_outputs.  A time units dictionary is
+    instantiated to allow for time outputs to be requested with physical
+    time units.  The get_time_series can be used to generate a
+    DatasetSeries object.
+
+    parameter_filename : str
+        The simulation parameter file.
+    find_outputs : bool
+        If True, the OutputDir directory is searched for datasets.  
+        Time and redshift information are gathered by temporarily 
+        instantiating each dataset.  This can be used when simulation 
+        data was created in a non-standard way, making it difficult 
+        to guess the corresponding time and redshift information.
+        Default: False.
+
+    Examples
+    --------
+    >>> import yt
+    >>> es = yt.simulation("my_simulation.par", "OWLS")
+    >>> es.get_time_series()
+    >>> for ds in es:
+    ...     print ds.current_time
+
+    """
+
+    def __init__(self, parameter_filename, find_outputs=False):
+        GadgetSimulation.__init__(self, parameter_filename,
+                                  find_outputs=find_outputs)
+
+    def _snapshot_format(self, index=None):
+        """
+        The snapshot filename for a given index.  Modify this for different 
+        naming conventions.
+        """
+
+        if self.parameters["OutputDir"].startswith("/"):
+            data_dir = self.parameters["OutputDir"]
+        else:
+            data_dir = os.path.join(self.directory,
+                                    self.parameters["OutputDir"])
+        if self.parameters["NumFilesPerSnapshot"] > 1:
+            suffix = ".0"
+        else:
+            suffix = ""
+        if self.parameters["SnapFormat"] == 3:
+            suffix += ".hdf5"
+        if index is None:
+            count = "*"
+        else:
+            count = "%03d" % index
+        keyword = "%s_%s" % (self.parameters["SnapshotFileBase"], count)
+        filename = os.path.join(keyword, "%s%s" % (keyword, suffix))
+        return os.path.join(data_dir, filename)

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/sdf/tests/test_outputs.py
--- a/yt/frontends/sdf/tests/test_outputs.py
+++ b/yt/frontends/sdf/tests/test_outputs.py
@@ -11,33 +11,35 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
 import numpy as np
+import socket
+from yt.testing import assert_equal
 from yt.frontends.sdf.api import SDFDataset
 from yt.visualization.api import ProjectionPlot
 from yt.testing import requires_module
+from yt.extern.six.moves import urllib
 
-_fields = (('deposit','all_cic'))
 
-import urllib2
-import socket
+_fields = (('deposit', 'all_cic'))
+scivis_data = "http://darksky.slac.stanford.edu/scivis2015/data/ds14_scivis_0128/ds14_scivis_0128_e4_dt04_1.0000"
 
-scivis_data = "http://darksky.slac.stanford.edu/scivis2015/data/ds14_scivis_0128/ds14_scivis_0128_e4_dt04_1.0000"
 
 # Answer on http://stackoverflow.com/questions/3764291/checking-network-connection
 # Better answer on http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
 def internet_on():
     try:
-        urllib2.urlopen(scivis_data, timeout = 1)
+        urllib.request.urlopen(scivis_data, timeout=1)
         return True
-    except urllib2.URLError:
+    except urllib.error.URLError:
         return False
     except socket.timeout:
         return False
 
+
 @requires_module('thingking')
 def test_scivis():
-    if not internet_on(): return
+    if not internet_on():
+        return
     ds = SDFDataset(scivis_data)
     yield assert_equal, str(ds), "ds14_scivis_0128_e4_dt04_1.0000"
     ad = ds.all_data()

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -68,8 +68,11 @@
                  parameter_file=None,
                  cosmology_parameters=None,
                  n_ref=64, over_refine_factor=1,
-                 bounding_box = None,
+                 bounding_box=None,
                  units_override=None):
+        # Because Tipsy outputs don't have a fixed domain boundary, one can
+        # specify a bounding box which effectively gives a domain_left_edge 
+        # and domain_right_edge
         self.bounding_box = bounding_box
         self.filter_bbox = (bounding_box is not None)
         self.n_ref = n_ref
@@ -106,14 +109,6 @@
                                "Use unit_base instead.")
         super(TipsyDataset, self).__init__(filename, dataset_type)
 
-        if bounding_box is not None:
-            bbox = self.arr(bounding_box, 'code_length', dtype="float64")
-            if bbox.shape == (2, 3):
-                bbox = bbox.transpose()
-            self.domain_left_edge = bbox[:,0]
-            self.domain_right_edge = bbox[:,1]
-
-
     def __repr__(self):
         return os.path.basename(self.parameter_filename)
 
@@ -176,13 +171,21 @@
         self.periodicity = (periodic, periodic, periodic)
         if comoving and period is None:
             period = 1.0
-        if periodic and period is not None:
-            # If we are periodic, that sets our domain width to either 1 or dPeriod.
-            self.domain_left_edge = np.zeros(3, "float64") - 0.5*period
-            self.domain_right_edge = np.zeros(3, "float64") + 0.5*period
-        else:
-            self.domain_left_edge = None
-            self.domain_right_edge = None
+        if self.bounding_box is None:
+            if periodic and period is not None:
+                # If we are periodic, that sets our domain width to either 1 or dPeriod.
+                self.domain_left_edge = np.zeros(3, "float64") - 0.5*period
+                self.domain_right_edge = np.zeros(3, "float64") + 0.5*period
+            else:
+                self.domain_left_edge = None
+                self.domain_right_edge = None
+        else: 
+            bbox = self.arr(self.bounding_box, 'code_length', dtype="float64")
+            if bbox.shape == (2, 3):
+                bbox = bbox.transpose()
+            self.domain_left_edge = bbox[:,0]
+            self.domain_right_edge = bbox[:,1]
+
         if comoving:
             cosm = self._cosmology_parameters or {}
             self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -15,14 +15,22 @@
 
 import h5py
 
+from distutils.version import LooseVersion
+
 class HDF5FileHandler(object):
     handle = None
+
     def __init__(self, filename):
         self.handle = h5py.File(filename, 'r')
 
     def __del__(self):
         if self.handle is not None:
-            self.handle.close()
+            # In h5py 2.4 and newer file handles are closed automatically.
+            # so only close the handle on old versions.  This works around an
+            # issue in h5py when run under python 3.4.
+            # See https://github.com/h5py/h5py/issues/534
+            if LooseVersion(h5py.__version__) < '2.4.0':
+                self.handle.close()
 
     def __getitem__(self, key):
         return self.handle[key]

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/utilities/hierarchy_inspection.py
--- a/yt/utilities/hierarchy_inspection.py
+++ b/yt/utilities/hierarchy_inspection.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 import inspect
 from collections import Counter
+from yt.extern.six.moves import reduce
 
 
 def find_lowest_subclasses(candidates):
@@ -31,6 +32,6 @@
 
     counters = [Counter(mro) for mro in mros]
 
-    count = reduce(lambda x, y: x+y, counters)
+    count = reduce(lambda x, y: x + y, counters)
 
     return [x for x in count.keys() if count[x] == 1]

diff -r 9b6a853e5cd1a2dc01b03fdd0eab8ca7cf2dce30 -r c85e81b8cf95bd11f366d476128753b34c03adfd yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -26,9 +26,6 @@
 
         Parameters
         ----------
-        center        : array_like
-           The current "center" of the view port -- the normal_vector connects
-           the center and the origin
         normal_vector : array_like
            A vector normal to the image plane
         north_vector  : array_like, optional


https://bitbucket.org/yt_analysis/yt/commits/f8a479a6a508/
Changeset:   f8a479a6a508
Branch:      yt
User:        cosmosquark
Date:        2015-05-29 19:05:37+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  6 files

diff -r c85e81b8cf95bd11f366d476128753b34c03adfd -r f8a479a6a508f9aedf39f1868f3112918f0fed95 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -116,7 +116,7 @@
   the width of the smallest grid element in the simulation from the
   last data snapshot (i.e. the one where time has evolved the
   longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
 * ``total_particles``, if supplied, this is a pre-calculated
   total number of dark matter
   particles present in the simulation. For example, this is useful

diff -r c85e81b8cf95bd11f366d476128753b34c03adfd -r f8a479a6a508f9aedf39f1868f3112918f0fed95 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -56,7 +56,7 @@
 
    import yt
    ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
 This object, ``surface``, can be queried for values on the surface.  For
@@ -172,7 +172,7 @@
    trans = [1.0, 0.5]
    filename = './surfaces'
 
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    for i,r in enumerate(rho):
        surf = ds.surface(sphere, 'density', r)
        surf.export_obj(filename, transparency = trans[i], color_field='temperature', plot_index = i)
@@ -248,7 +248,7 @@
        return (data['density']*data['density']*np.sqrt(data['temperature']))
    add_field("emissivity", function=_Emissivity, units=r"g*K/cm**6")
 
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    for i,r in enumerate(rho):
        surf = ds.surface(sphere, 'density', r)
        surf.export_obj(filename, transparency = trans[i],

diff -r c85e81b8cf95bd11f366d476128753b34c03adfd -r f8a479a6a508f9aedf39f1868f3112918f0fed95 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1232,9 +1232,8 @@
         fglob = path.join(basedir, 'halos_%d.*.bin' % n)
         files = glob.glob(fglob)
         halos = self._get_halos_binary(files)
-        #Jc = mass_sun_cgs/ ds['mpchcm'] * 1e5
         Jc = 1.0
-        length = 1.0 / ds['mpchcm']
+        length = 1.0 / ds['Mpchcm']
         conv = dict(pos = np.array([length, length, length,
                                     1, 1, 1]), # to unitary
                     r=1.0/ds['kpchcm'], # to unitary

diff -r c85e81b8cf95bd11f366d476128753b34c03adfd -r f8a479a6a508f9aedf39f1868f3112918f0fed95 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -729,7 +729,7 @@
 
     >>> import yt
     >>> ds = yt.load("RedshiftOutput0005")
-    >>> sp = ds.sphere("max", (1.0, 'mpc'))
+    >>> sp = ds.sphere("max", (1.0, 'Mpc'))
     >>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"])
     """
     _type_name = "cut_region"

diff -r c85e81b8cf95bd11f366d476128753b34c03adfd -r f8a479a6a508f9aedf39f1868f3112918f0fed95 yt/fields/field_aliases.py
--- a/yt/fields/field_aliases.py
+++ b/yt/fields/field_aliases.py
@@ -141,12 +141,12 @@
     ("CellMassCode",                          "code_mass"),
     ("TotalMassMsun",                         "msun"),
     ("CellVolumeCode",                        "code_length"),
-    ("CellVolumeMpc",                         "mpc**3"),
-    ("ParticleSpecificAngularMomentumXKMSMPC","km/s/mpc"),
-    ("ParticleSpecificAngularMomentumYKMSMPC","km/s/mpc"),
-    ("ParticleSpecificAngularMomentumZKMSMPC","km/s/mpc"),
-    ("RadiusMpc",                             "mpc"),
-    ("ParticleRadiusMpc",                     "mpc"),
+    ("CellVolumeMpc",                         "Mpc**3"),
+    ("ParticleSpecificAngularMomentumXKMSMPC","km/s/Mpc"),
+    ("ParticleSpecificAngularMomentumYKMSMPC","km/s/Mpc"),
+    ("ParticleSpecificAngularMomentumZKMSMPC","km/s/Mpc"),
+    ("RadiusMpc",                             "Mpc"),
+    ("ParticleRadiusMpc",                     "Mpc"),
     ("ParticleRadiuskpc",                     "kpc"),
     ("Radiuskpc",                             "kpc"),
     ("ParticleRadiuskpch",                    "kpc"),

diff -r c85e81b8cf95bd11f366d476128753b34c03adfd -r f8a479a6a508f9aedf39f1868f3112918f0fed95 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -890,7 +890,7 @@
         elif self.dimensionality == 2:
             self._setup_2d()
 
-    def set_code_units(self):
+    def _set_code_unit_attributes(self):
         if self.cosmological_simulation:
             k = self.cosmology_get_units()
             # Now some CGS values
@@ -928,17 +928,6 @@
         magnetic_unit = np.float64(magnetic_unit.in_cgs())
         self.magnetic_unit = self.quan(magnetic_unit, "gauss")
 
-        self._override_code_units()
-
-        self.unit_registry.modify("code_magnetic", self.magnetic_unit)
-        self.unit_registry.modify("code_length", self.length_unit)
-        self.unit_registry.modify("code_mass", self.mass_unit)
-        self.unit_registry.modify("code_time", self.time_unit)
-        self.unit_registry.modify("code_velocity", self.velocity_unit)
-        DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
-                               DW.units.dimensions)
-
     def cosmology_get_units(self):
         """
         Return an Enzo-fortran style dictionary of units to feed into custom


https://bitbucket.org/yt_analysis/yt/commits/bfddbd5594cb/
Changeset:   bfddbd5594cb
Branch:      yt
User:        Ben Thompson
Date:        2015-05-29 19:09:33+00:00
Summary:     merging
Affected #:  2 files

diff -r f8a479a6a508f9aedf39f1868f3112918f0fed95 -r bfddbd5594cbb18c563c5b50c9796e703f202b40 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1225,6 +1225,13 @@
     return v
 
 def ucross(arr1,arr2):
+   """Applies the cross product to two YT arrays.
+
+   This wrapper around numpy.cross preserves units.  
+   See the documentation of numpy.cross for full
+   details.
+   """
+
    v = np.cross(arr1,arr2)
    units = arr1.units * arr2.units
    arr = YTArray(v,units)

diff -r f8a479a6a508f9aedf39f1868f3112918f0fed95 -r bfddbd5594cbb18c563c5b50c9796e703f202b40 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -301,33 +301,36 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
-
     # First translate the positions to center of mass reference frame.
     if P is not None:
         P = P - CoM
 
-    if (L == np.array([0, 0, 1.])).all():
-        # Whew! No rotation to do!
-        if V is None:
-            return L, P
-        elif P is None:
-            return L, V
-        else:
-            return L, P, V
+    # is the L vector pointing in the Z direction?
+    if (L[0:2] == 0.0).all():
+        # yes it is, now lets check if we are just changing the direction
+        # of the z axis or not
+       if L[2] < 0.0:
+          # Just a simple flip of axis to do!
+          if P is not None:
+             P = -P
+          if V is not None:
+             V = -V
 
-    if (L == np.array([0, 0, -1.])).all():
-        # Just a simple flip of axis to do!
-        if P is not None:
-            P = -P
-        if V is not None:
-            V = -V
+          if V is None:
+             return L, P
+          elif P is None:
+             return L, V
+          else:
+              return L, P, V
 
-        if V is None:
-            return L, P
-        elif P is None:
-            return L, V
-        else:
-            return L, P, V
+       else:
+          # Whew! No rotation to do!
+          if V is None:
+             return L, P
+          elif P is None:
+             return L, V
+          else:
+             return L, P, V
 
     # Normal vector is not aligned with simulation Z axis
     # Therefore we are going to have to apply a rotation
@@ -908,7 +911,6 @@
     tile_shape = [1] + list(coords.shape)[1:]
     J = np.tile(res_normal, tile_shape)
 
-    # returns the absolute value
     return np.sum(J*coords, axis=0)  
 
 def get_cyl_theta(coords, normal):


https://bitbucket.org/yt_analysis/yt/commits/b11f408fefc6/
Changeset:   b11f408fefc6
Branch:      yt
User:        Ben Thompson
Date:        2015-05-29 19:17:56+00:00
Summary:     being PEP8 complient
Affected #:  3 files

diff -r bfddbd5594cbb18c563c5b50c9796e703f202b40 -r b11f408fefc6c6b8d353b9cd59d941370f18521c yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -245,9 +245,9 @@
 
 def get_angular_momentum_components(ptype, data, spos, svel):
     if data.has_field_parameter("normal"):
-       normal = data.get_field_parameter("normal")
+        normal = data.get_field_parameter("normal")
     else:
-       normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
+        normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
     bv = data.get_field_parameter("bulk_velocity")
     pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
     vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T

diff -r bfddbd5594cbb18c563c5b50c9796e703f202b40 -r b11f408fefc6c6b8d353b9cd59d941370f18521c yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1225,17 +1225,17 @@
     return v
 
 def ucross(arr1,arr2):
-   """Applies the cross product to two YT arrays.
+    """Applies the cross product to two YT arrays.
 
-   This wrapper around numpy.cross preserves units.  
-   See the documentation of numpy.cross for full
-   details.
-   """
+    This wrapper around numpy.cross preserves units.  
+    See the documentation of numpy.cross for full
+    details.
+    """
 
-   v = np.cross(arr1,arr2)
-   units = arr1.units * arr2.units
-   arr = YTArray(v,units)
-   return arr
+    v = np.cross(arr1,arr2)
+    units = arr1.units * arr2.units
+    arr = YTArray(v,units)
+    return arr
 
 def uintersect1d(arr1, arr2, assume_unique=False):
     """Find the sorted unique elements of the two input arrays.

diff -r bfddbd5594cbb18c563c5b50c9796e703f202b40 -r b11f408fefc6c6b8d353b9cd59d941370f18521c yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -309,28 +309,28 @@
     if (L[0:2] == 0.0).all():
         # yes it is, now lets check if we are just changing the direction
         # of the z axis or not
-       if L[2] < 0.0:
-          # Just a simple flip of axis to do!
-          if P is not None:
-             P = -P
-          if V is not None:
-             V = -V
+        if L[2] < 0.0:
+            # Just a simple flip of axis to do!
+            if P is not None:
+                P = -P
+            if V is not None:
+                V = -V
 
-          if V is None:
-             return L, P
-          elif P is None:
-             return L, V
-          else:
-              return L, P, V
+            if V is None:
+                return L, P
+            elif P is None:
+                return L, V
+            else:
+                return L, P, V
 
-       else:
-          # Whew! No rotation to do!
-          if V is None:
-             return L, P
-          elif P is None:
-             return L, V
-          else:
-             return L, P, V
+        else:
+            # Whew! No rotation to do!
+            if V is None:
+                return L, P
+            elif P is None:
+                return L, V
+            else:
+                return L, P, V
 
     # Normal vector is not aligned with simulation Z axis
     # Therefore we are going to have to apply a rotation


https://bitbucket.org/yt_analysis/yt/commits/e988822115da/
Changeset:   e988822115da
Branch:      yt
User:        Ben Thompson
Date:        2015-06-02 18:08:25+00:00
Summary:     retaining orientation consistancy with the angular momentum fields in the gas, and keeping the unit registry consistent within the ucross function
Affected #:  2 files

diff -r b11f408fefc6c6b8d353b9cd59d941370f18521c -r e988822115da07f784cddf08208c225381bdffcc yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -281,7 +281,9 @@
         center = data.get_field_parameter('center')
         pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
         L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)
-        return ucross(r_vec, v_vec)
+        # adding in the unit registry allows us to have a reference to the dataset
+        # and thus we will always get the correct units after applying the cross product.
+        return -ucross(r_vec, v_vec, registry = data.unit_registry)
 
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),
@@ -307,7 +309,7 @@
         pos = pos.T
         vel = vel.T
         y, z, yv, zv = pos[1], pos[2], vel[1], vel[2]
-        return y*zv - z*yv
+        return yv*z - zv*y
 
     registry.add_field((ptype, "particle_specific_angular_momentum_x"),
               function=_particle_specific_angular_momentum_x,
@@ -332,7 +334,7 @@
         pos = pos.T
         vel = vel.T
         x, z, xv, zv = pos[0], pos[2], vel[0], vel[2]
-        return -(x*zv - z*xv)
+        return -(xv*z - zv*x)
 
 
     registry.add_field((ptype, "particle_specific_angular_momentum_y"),
@@ -358,7 +360,7 @@
         pos = pos.T
         vel = vel.T
         x, y, xv, yv = pos[0], pos[1], vel[0], vel[1]
-        return x*yv - y*xv
+        return xv*y - yv*x
 
 
     registry.add_field((ptype, "particle_specific_angular_momentum_z"),
@@ -398,8 +400,8 @@
              validators=[ValidateParameter('center')])
 
     def _particle_angular_momentum(field, data):
-	am = data[ptype, "particle_mass"] * data[ptype, "particle_specific_angular_momentum"].T
-	return am.T
+        am = data[ptype, "particle_mass"] * data[ptype, "particle_specific_angular_momentum"].T
+        return am.T
 
 
     registry.add_field((ptype, "particle_angular_momentum"),

diff -r b11f408fefc6c6b8d353b9cd59d941370f18521c -r e988822115da07f784cddf08208c225381bdffcc yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1224,7 +1224,7 @@
     v = validate_numpy_wrapper_units(v, arrs)
     return v
 
-def ucross(arr1,arr2):
+def ucross(arr1,arr2, registry=None):
     """Applies the cross product to two YT arrays.
 
     This wrapper around numpy.cross preserves units.  
@@ -1234,7 +1234,7 @@
 
     v = np.cross(arr1,arr2)
     units = arr1.units * arr2.units
-    arr = YTArray(v,units)
+    arr = YTArray(v,units, registry=registry)
     return arr
 
 def uintersect1d(arr1, arr2, assume_unique=False):


https://bitbucket.org/yt_analysis/yt/commits/c692a621de3b/
Changeset:   c692a621de3b
Branch:      yt
User:        xarthisius
Date:        2015-06-02 22:06:26+00:00
Summary:     Refactor particle fields
Affected #:  1 file

diff -r e988822115da07f784cddf08208c225381bdffcc -r c692a621de3b2d2e607150f9bfeea969c01fffcf yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -283,7 +283,7 @@
         L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)
         # adding in the unit registry allows us to have a reference to the dataset
         # and thus we will always get the correct units after applying the cross product.
-        return -ucross(r_vec, v_vec, registry = data.unit_registry)
+        return -ucross(r_vec, v_vec, registry=data.ds.unit_registry)
 
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),
@@ -292,118 +292,29 @@
               units="cm**2/s",
               validators=[ValidateParameter("center")])
 
-    def _particle_specific_angular_momentum_x(field, data):
-        """
-        Computes the specific angular momentum of a field in the x direction
-
-        If the dataset has the field parameter "normal", then the 
-        specific angular momentum is calculated in the x axis relative
-        to the normal vector.
-
-        Note that the orientation of the x and y axes are arbitrary if a normal
-        vector is defined.
-        """
-        center = data.get_field_parameter('center')
-        pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
-        L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
-        pos = pos.T
-        vel = vel.T
-        y, z, yv, zv = pos[1], pos[2], vel[1], vel[2]
-        return yv*z - zv*y
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_x"),
-              function=_particle_specific_angular_momentum_x,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    def _particle_specific_angular_momentum_y(field, data):
-        """
-        Computes the specific angular momentum of a field in the y direction
-
-        If the dataset has the field parameter "normal", then the 
-        specific angular momentum is calculated in the y axis relative
-        to the normal vector.
-
-        Note that the orientation of the x and y axes are arbitrary if a normal
-        vector is defined.
-        """
-        center = data.get_field_parameter('center')
-        pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
-        L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
-        pos = pos.T
-        vel = vel.T
-        x, z, xv, zv = pos[0], pos[2], vel[0], vel[2]
-        return -(xv*z - zv*x)
-
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_y"),
-              function=_particle_specific_angular_momentum_y,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    def _particle_specific_angular_momentum_z(field, data):
-        """
-        Computes the specific angular momentum of a field in the z direction
-
-        If the dataset has the field parameter "normal", then the 
-        specific angular momentum is calculated in the z axis relative
-        to the normal vector.
-
-        Note that the orientation of the x and y axes are arbitrary if a normal
-        vector is defined.
-        """
-        center = data.get_field_parameter('center')
-        pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
-        L, pos, vel = modify_reference_frame(center, normal, P=pos, V=vel)
-        pos = pos.T
-        vel = vel.T
-        x, y, xv, yv = pos[0], pos[1], vel[0], vel[1]
-        return xv*y - yv*x
-
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_z"),
-              function=_particle_specific_angular_momentum_z,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    create_magnitude_field(registry, "particle_specific_angular_momentum",
-                           "cm**2/s", ftype=ptype, particle_type=True)
-
-    def _particle_angular_momentum_x(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_x"]
-
-    registry.add_field((ptype, "particle_angular_momentum_x"),
-             function=_particle_angular_momentum_x,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
-
-    def _particle_angular_momentum_y(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_y"]
-
-    registry.add_field((ptype, "particle_angular_momentum_y"),
-             function=_particle_angular_momentum_y,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
-
-    def _particle_angular_momentum_z(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_z"]
-
-    registry.add_field((ptype, "particle_angular_momentum_z"),
-             function=_particle_angular_momentum_z,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
+    def _get_spec_ang_mom_comp(axi, ax, _ptype):
+        def _particle_specific_angular_momentum_component(field, data):
+            return data[_ptype, "particle_specific_angular_momentum"][:, axi]
+        def _particle_angular_momentum_component(field, data):
+            return data[_ptype, "particle_mass"] * \
+                data[ptype, "particle_specific_angular_momentum_%s" % ax]
+        return _particle_specific_angular_momentum_component, \
+            _particle_angular_momentum_component
+    for axi, ax in enumerate("xyz"):
+        f, v = _get_spec_ang_mom_comp(axi, ax, ptype)
+        registry.add_field(
+            (ptype, "particle_specific_angular_momentum_%s" % ax),
+            particle_type = True, function=f, units="cm**2/s",
+            validators=[ValidateParameter("center")]
+        )
+        registry.add_field((ptype, "particle_angular_momentum_%s" % ax),
+            function=v, units="g*cm**2/s", particle_type=True,
+            validators=[ValidateParameter('center')])
 
     def _particle_angular_momentum(field, data):
         am = data[ptype, "particle_mass"] * data[ptype, "particle_specific_angular_momentum"].T
         return am.T
 
-
     registry.add_field((ptype, "particle_angular_momentum"),
               function=_particle_angular_momentum,
               particle_type=True,
@@ -449,74 +360,6 @@
         units="cm",
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
-    def _particle_position_relative_x(field, data):
-        """The x component of the  particle positions in a rotated reference
-        frame
-
-        Relative to the coordinate system defined by the *normal* vector and
-        *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
-        L, pos, = modify_reference_frame(center, normal, P=pos)
-        pos = pos.T
-        return pos[0]
-
-    registry.add_field(
-        (ptype, "particle_position_relative_x"),
-        function=_particle_position_relative_x,
-        particle_type=True,
-        units="cm",
-        validators=[ValidateParameter("normal"), ValidateParameter("center")])
-
-    def _particle_position_relative_y(field, data):
-        """The y component of the  particle positions in a rotated reference
-        frame
-
-        Relative to the coordinate system defined by the *normal* vector and
-        *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
-        L, pos = modify_reference_frame(center, normal, P=pos)
-        pos = pos.T
-        return pos[1]
-
-    registry.add_field((ptype, "particle_position_relative_y"),
-              function=_particle_position_relative_y,
-              particle_type=True, units="cm",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
-
-
-    def _particle_position_relative_z(field, data):
-        """The z component of the  particle positions in a rotated reference
-        frame
-
-        Relative to the coordinate system defined by the *normal* vector and
-        *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
-        L, pos = modify_reference_frame(center, normal, P=pos)
-        pos = pos.T
-        return pos[2]
-
-    registry.add_field((ptype, "particle_position_relative_z"),
-              function=_particle_position_relative_z,
-              particle_type=True, units="cm",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
-
     def _particle_velocity_relative(field, data):
         """The vector particle velocities in an arbitrary coordinate system
 
@@ -538,74 +381,22 @@
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
-    def _particle_velocity_relative_x(field, data):
-        """The x component of the particle velocities in an arbitrary coordinate
-        system
 
-        Relative to the coordinate system defined by the *normal* vector,
-        *bulk_velocity* vector and *center* field parameters.
+    def _get_coord_funcs_relative(axi, _ptype):
+        def _particle_pos_rel(field, data):
+            return data[_ptype, "particle_position_relative"][:, axi]
+        def _particle_vel_rel(field, data):
+            return data[_ptype, "particle_velocity_relative"][:, axi]
+        return _particle_vel_rel, _particle_pos_rel
+    for axi, ax in enumerate("xyz"):
+        v, p = _get_coord_funcs_relative(axi, ptype)
+        registry.add_field((ptype, "particle_velocity_relative_%s" % ax),
+            particle_type = True, function = v,
+            units = "code_velocity")
+        registry.add_field((ptype, "particle_position_relative_%s" % ax),
+            particle_type = True, function = p,
+            units = "code_length")
 
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
-        L, vel = modify_reference_frame(center, normal, V=vel)
-        vel = vel.T
-        return vel[0]
-
-    registry.add_field((ptype, "particle_velocity_relative_x"),
-              function=_particle_velocity_relative_x,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
-
-    def _particle_velocity_relative_y(field, data):
-        """The y component of the particle velocities in an arbitrary coordinate
-        system
-
-        Relative to the coordinate system defined by the *normal* vector,
-        *bulk_velocity* vector and *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter('bulk_velocity')
-        vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
-        L, vel = modify_reference_frame(center, normal, V=vel)
-        vel = vel.T
-        return vel[1]
-
-    registry.add_field((ptype, "particle_velocity_relative_y"),
-              function=_particle_velocity_relative_y,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
-
-    def _particle_velocity_relative_z(field, data):
-        """The z component of the particle velocities in an arbitrary coordinate
-        system
-
-        Relative to the coordinate system defined by the *normal* vector,
-        *bulk_velocity* vector and *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
-        L, vel = modify_reference_frame(center, normal, V=vel)
-        vel = vel.T
-        return vel[2]
-
-    registry.add_field((ptype, "particle_velocity_relative_z"),
-              function=_particle_velocity_relative_z,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
 
     # this is just particle radius but we add it with an alias for the sake of
     # consistent naming


https://bitbucket.org/yt_analysis/yt/commits/115c9f124932/
Changeset:   115c9f124932
Branch:      yt
User:        Ben Thompson
Date:        2015-06-02 23:28:03+00:00
Summary:     simplifying modify_reference_frame
Affected #:  1 file

diff -r c692a621de3b2d2e607150f9bfeea969c01fffcf -r 115c9f124932d6961e3109d5b2f1b308c1558eb8 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -307,30 +307,22 @@
 
     # is the L vector pointing in the Z direction?
     if (L[0:2] == 0.0).all():
-        # yes it is, now lets check if we are just changing the direction
-        # of the z axis or not
+        # the reason why we need to explicitly check for the above
+        # is that arccos returns NaN if L[0:2] == 0.0
+        # this just checks if we need to flip the z axis or not
         if L[2] < 0.0:
-            # Just a simple flip of axis to do!
+            # this is just a simple flip in direction of the z axis
             if P is not None:
                 P = -P
             if V is not None:
                 V = -V
 
-            if V is None:
-                return L, P
-            elif P is None:
-                return L, V
-            else:
-                return L, P, V
-
+        if V is None:
+            return L, P
+        elif P is None:
+            return L, V
         else:
-            # Whew! No rotation to do!
-            if V is None:
-                return L, P
-            elif P is None:
-                return L, V
-            else:
-                return L, P, V
+            return L, P, V
 
     # Normal vector is not aligned with simulation Z axis
     # Therefore we are going to have to apply a rotation


https://bitbucket.org/yt_analysis/yt/commits/108aba2425c5/
Changeset:   108aba2425c5
Branch:      yt
User:        Ben Thompson
Date:        2015-06-02 23:30:47+00:00
Summary:     improving return logic slightly in modify_reference_frame
Affected #:  1 file

diff -r 115c9f124932d6961e3109d5b2f1b308c1558eb8 -r 108aba2425c51f2bf8cd33353d05437ce30ee6fb yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -317,9 +317,9 @@
             if V is not None:
                 V = -V
 
-        if V is None:
+        if V is None and P is not None:
             return L, P
-        elif P is None:
+        elif P is None and V is not None:
             return L, V
         else:
             return L, P, V
@@ -347,9 +347,11 @@
     if V is not None:
         V = rotate_vector_3D(V, 1, theta)
     L = rotate_vector_3D(L, 1, theta)
-    if V is None:
+
+    # return the values
+    if V is None and P is not None:
         return L, P
-    if P is None:
+    elif P is None and V is not None:
         return L, V
     else:
         return L, P, V


https://bitbucket.org/yt_analysis/yt/commits/198e8d3e7384/
Changeset:   198e8d3e7384
Branch:      yt
User:        xarthisius
Date:        2015-06-03 02:55:40+00:00
Summary:     Simplify return statement in modify_reference_frame
Affected #:  1 file

diff -r 108aba2425c51f2bf8cd33353d05437ce30ee6fb -r 198e8d3e73841166254fac88168addd29083730f yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -306,9 +306,9 @@
         P = P - CoM
 
     # is the L vector pointing in the Z direction?
-    if (L[0:2] == 0.0).all():
+    if np.inner(L[:2], L[:2]) == 0.0:
         # the reason why we need to explicitly check for the above
-        # is that arccos returns NaN if L[0:2] == 0.0
+        # is that formula is used in denominator
         # this just checks if we need to flip the z axis or not
         if L[2] < 0.0:
             # this is just a simple flip in direction of the z axis
@@ -317,20 +317,15 @@
             if V is not None:
                 V = -V
 
-        if V is None and P is not None:
-            return L, P
-        elif P is None and V is not None:
-            return L, V
-        else:
-            return L, P, V
+        return filter(None, (L, P, V))
 
     # Normal vector is not aligned with simulation Z axis
     # Therefore we are going to have to apply a rotation
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
-    LL[2] = 0.
-    theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
-    if L[1] < 0:
+    LL[2] = 0.0
+    theta = np.arccos(np.inner(LL, [1.0, 0.0, 0.0]) / np.inner(LL, LL) ** 0.5)
+    if L[1] < 0.0:
         theta = -theta
     # Now rotate all the position, velocity, and L vectors by this much around
     # the z axis.
@@ -340,7 +335,7 @@
         V = rotate_vector_3D(V, 2, theta)
     L = rotate_vector_3D(L, 2, theta)
     # Now find the angle between L and the z-axis.
-    theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
+    theta = np.arccos(np.inner(L, [0.0, 0.0, 1.0]) / np.inner(L, L) ** 0.5)
     # This time we rotate around the y axis.
     if P is not None:
         P = rotate_vector_3D(P, 1, theta)
@@ -348,13 +343,7 @@
         V = rotate_vector_3D(V, 1, theta)
     L = rotate_vector_3D(L, 1, theta)
 
-    # return the values
-    if V is None and P is not None:
-        return L, P
-    elif P is None and V is not None:
-        return L, V
-    else:
-        return L, P, V
+    return filter(None, (L, P, V))
 
 def compute_rotational_velocity(CoM, L, P, V):
     r"""Computes the rotational velocity for some data around an axis.


https://bitbucket.org/yt_analysis/yt/commits/9c35837b518f/
Changeset:   9c35837b518f
Branch:      yt
User:        Ben Thompson
Date:        2015-06-03 15:54:40+00:00
Summary:     Added in the minor admendments for modify_ref_frame and now I think we are ready to push
Affected #:  1 file

diff -r 198e8d3e73841166254fac88168addd29083730f -r 9c35837b518fc1160c4d3cc6cef0215d0ca0f4c4 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -317,7 +317,13 @@
             if V is not None:
                 V = -V
 
-        return filter(None, (L, P, V))
+        # return the values
+        if V is None and P is not None:
+            return L, P
+        elif P is None and V is not None:
+            return L, V
+        else:
+            return L, P, V
 
     # Normal vector is not aligned with simulation Z axis
     # Therefore we are going to have to apply a rotation
@@ -343,7 +349,13 @@
         V = rotate_vector_3D(V, 1, theta)
     L = rotate_vector_3D(L, 1, theta)
 
-    return filter(None, (L, P, V))
+    # return the values
+    if V is None and P is not None:
+        return L, P
+    elif P is None and V is not None:
+        return L, V
+    else:
+        return L, P, V
 
 def compute_rotational_velocity(CoM, L, P, V):
     r"""Computes the rotational velocity for some data around an axis.


https://bitbucket.org/yt_analysis/yt/commits/8d3663ac217d/
Changeset:   8d3663ac217d
Branch:      yt
User:        xarthisius
Date:        2015-06-03 16:39:43+00:00
Summary:     Merged in cosmosquark/yt (pull request #1585)

[bugfix][improvement] Specific Angular Momentum [xyz] computed relative to a normal vector and some bug fixes
Affected #:  3 files

diff -r f000bf5609433df69390a45396dc3fe60c87b0b5 -r 8d3663ac217ddab3405fc79be7e7b299aa09bbd0 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -23,7 +23,8 @@
     ValidateSpatial
 
 from yt.units.yt_array import \
-    uconcatenate
+    uconcatenate, \
+    ucross
 
 from yt.utilities.math_utils import \
     get_sph_r_component, \
@@ -118,10 +119,8 @@
              units = "g")
 
     def particle_density(field, data):
-        pos = data[ptype, coord_name]
-        mass = data[ptype, mass_name]
-        pos.convert_to_units("code_length")
-        mass.convert_to_units("code_mass")
+        pos = data[ptype, coord_name].convert_to_units("code_length")
+        mass = data[ptype, mass_name].convert_to_units("code_mass")
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
         d = data.ds.arr(d, "code_mass")
         d /= data["index", "cell_volume"]
@@ -244,17 +243,26 @@
                        units = "cm / s",
                        particle_type=True)
 
+def get_angular_momentum_components(ptype, data, spos, svel):
+    if data.has_field_parameter("normal"):
+        normal = data.get_field_parameter("normal")
+    else:
+        normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
+    vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+    return pos, vel, normal, bv
+
 def standard_particle_fields(registry, ptype,
                              spos = "particle_position_%s",
                              svel = "particle_velocity_%s"):
     # This function will set things up based on the scalar fields and standard
     # yt field names.
+    # data.get_field_parameter("bulk_velocity") defaults to YTArray([0,0,0] cm/s)
 
     def _particle_velocity_magnitude(field, data):
         """ M{|v|} """
         bulk_velocity = data.get_field_parameter("bulk_velocity")
-        if bulk_velocity is None:
-            bulk_velocity = np.zeros(3)
         return np.sqrt((data[ptype, svel % 'x'] - bulk_velocity[0])**2
                      + (data[ptype, svel % 'y'] - bulk_velocity[1])**2
                      + (data[ptype, svel % 'z'] - bulk_velocity[2])**2 )
@@ -270,19 +278,13 @@
         Calculate the angular of a particle velocity.  Returns a vector for each
         particle.
         """
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        xv = data[ptype, svel % 'x'] - bv[0]
-        yv = data[ptype, svel % 'y'] - bv[1]
-        zv = data[ptype, svel % 'z'] - bv[2]
         center = data.get_field_parameter('center')
-        coords = self.ds.arr([data[ptype, spos % d] for d in 'xyz'],
-                             dtype=np.float64, units='cm')
-        new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-        r_vec = coords - np.reshape(center,new_shape)
-        v_vec = self.ds.arr([xv,yv,zv], dtype=np.float64, units='cm/s')
-        return np.cross(r_vec, v_vec, axis=0).T
+        pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
+        L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)
+        # adding in the unit registry allows us to have a reference to the dataset
+        # and thus we will always get the correct units after applying the cross product.
+        return -ucross(r_vec, v_vec, registry=data.ds.unit_registry)
+
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),
               function=_particle_specific_angular_momentum,
@@ -290,87 +292,29 @@
               units="cm**2/s",
               validators=[ValidateParameter("center")])
 
-    def _particle_specific_angular_momentum_x(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        center = data.get_field_parameter('center')
-        y = data[ptype, spos % "y"] - center[1]
-        z = data[ptype, spos % "z"] - center[2]
-        yv = data[ptype, svel % "y"] - bv[1]
-        zv = data[ptype, svel % "z"] - bv[2]
-        return yv*z - zv*y
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_x"),
-              function=_particle_specific_angular_momentum_x,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    def _particle_specific_angular_momentum_y(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        center = data.get_field_parameter('center')
-        x = data[ptype, spos % "x"] - center[0]
-        z = data[ptype, spos % "z"] - center[2]
-        xv = data[ptype, svel % "x"] - bv[0]
-        zv = data[ptype, svel % "z"] - bv[2]
-        return -(xv*z - zv*x)
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_y"),
-              function=_particle_specific_angular_momentum_y,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    def _particle_specific_angular_momentum_z(field, data):
-        if data.has_field_parameter("bulk_velocity"):
-            bv = data.get_field_parameter("bulk_velocity")
-        else: bv = np.zeros(3, dtype=np.float64)
-        center = data.get_field_parameter('center')
-        x = data[ptype, spos % "x"] - center[0]
-        y = data[ptype, spos % "y"] - center[1]
-        xv = data[ptype, svel % "x"] - bv[0]
-        yv = data[ptype, svel % "y"] - bv[1]
-        return xv*y - yv*x
-
-    registry.add_field((ptype, "particle_specific_angular_momentum_z"),
-              function=_particle_specific_angular_momentum_z,
-              particle_type=True,
-              units="cm**2/s",
-              validators=[ValidateParameter("center")])
-
-    create_magnitude_field(registry, "particle_specific_angular_momentum",
-                           "cm**2/s", ftype=ptype, particle_type=True)
-
-    def _particle_angular_momentum_x(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_x"]
-    registry.add_field((ptype, "particle_angular_momentum_x"),
-             function=_particle_angular_momentum_x,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
-
-    def _particle_angular_momentum_y(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_y"]
-    registry.add_field((ptype, "particle_angular_momentum_y"),
-             function=_particle_angular_momentum_y,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
-
-    def _particle_angular_momentum_z(field, data):
-        return data[ptype, "particle_mass"] * \
-               data[ptype, "particle_specific_angular_momentum_z"]
-    registry.add_field((ptype, "particle_angular_momentum_z"),
-             function=_particle_angular_momentum_z,
-             units="g*cm**2/s", particle_type=True,
-             validators=[ValidateParameter('center')])
+    def _get_spec_ang_mom_comp(axi, ax, _ptype):
+        def _particle_specific_angular_momentum_component(field, data):
+            return data[_ptype, "particle_specific_angular_momentum"][:, axi]
+        def _particle_angular_momentum_component(field, data):
+            return data[_ptype, "particle_mass"] * \
+                data[ptype, "particle_specific_angular_momentum_%s" % ax]
+        return _particle_specific_angular_momentum_component, \
+            _particle_angular_momentum_component
+    for axi, ax in enumerate("xyz"):
+        f, v = _get_spec_ang_mom_comp(axi, ax, ptype)
+        registry.add_field(
+            (ptype, "particle_specific_angular_momentum_%s" % ax),
+            particle_type = True, function=f, units="cm**2/s",
+            validators=[ValidateParameter("center")]
+        )
+        registry.add_field((ptype, "particle_angular_momentum_%s" % ax),
+            function=v, units="g*cm**2/s", particle_type=True,
+            validators=[ValidateParameter('center')])
 
     def _particle_angular_momentum(field, data):
-        return (data[ptype, "particle_mass"] *
-                data[ptype, "particle_specific_angular_momentum"].T).T
+        am = data[ptype, "particle_mass"] * data[ptype, "particle_specific_angular_momentum"].T
+        return am.T
+
     registry.add_field((ptype, "particle_angular_momentum"),
               function=_particle_angular_momentum,
               particle_type=True,
@@ -405,9 +349,7 @@
         """
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        pos = pos.T
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
         L, pos = modify_reference_frame(center, normal, P=pos)
         return pos
 
@@ -418,81 +360,6 @@
         units="cm",
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
-    def _particle_position_relative_x(field, data):
-        """The x component of the  particle positions in a rotated reference
-        frame
-
-        Relative to the coordinate system defined by the *normal* vector and
-        *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        pos = pos.T
-        L, pos, = modify_reference_frame(center, normal, P=pos)
-        pos = pos.T
-        return pos[0]
-
-    registry.add_field(
-        (ptype, "particle_position_relative_x"),
-        function=_particle_position_relative_x,
-        particle_type=True,
-        units="cm",
-        validators=[ValidateParameter("normal"), ValidateParameter("center")])
-
-    def _particle_position_relative_y(field, data):
-        """The y component of the  particle positions in a rotated reference
-        frame
-
-        Relative to the coordinate system defined by the *normal* vector and
-        *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        pos = pos.T
-        L, pos = modify_reference_frame(center, normal, P=pos)
-        pos = pos.T
-        return pos[1]
-
-    registry.add_field((ptype, "particle_position_relative_y"),
-              function=_particle_position_relative_y,
-              particle_type=True, units="cm",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
-
-
-    def _particle_position_relative_z(field, data):
-        """The z component of the  particle positions in a rotated reference
-        frame
-
-        Relative to the coordinate system defined by the *normal* vector and
-        *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        pos = pos.T
-        L, pos = modify_reference_frame(center, normal, P=pos)
-        pos = pos.T
-        return pos[2]
-
-    registry.add_field((ptype, "particle_position_relative_z"),
-              function=_particle_position_relative_z,
-              particle_type=True, units="cm",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
-
     def _particle_velocity_relative(field, data):
         """The vector particle velocities in an arbitrary coordinate system
 
@@ -504,10 +371,7 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        vel = vel - np.reshape(bv, (3, 1))
-        vel = vel.T
+        vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
         L, vel = modify_reference_frame(center, normal, V=vel)
         return vel
 
@@ -517,83 +381,22 @@
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
-    def _particle_velocity_relative_x(field, data):
-        """The x component of the particle velocities in an arbitrary coordinate
-        system
 
-        Relative to the coordinate system defined by the *normal* vector,
-        *bulk_velocity* vector and *center* field parameters.
+    def _get_coord_funcs_relative(axi, _ptype):
+        def _particle_pos_rel(field, data):
+            return data[_ptype, "particle_position_relative"][:, axi]
+        def _particle_vel_rel(field, data):
+            return data[_ptype, "particle_velocity_relative"][:, axi]
+        return _particle_vel_rel, _particle_pos_rel
+    for axi, ax in enumerate("xyz"):
+        v, p = _get_coord_funcs_relative(axi, ptype)
+        registry.add_field((ptype, "particle_velocity_relative_%s" % ax),
+            particle_type = True, function = v,
+            units = "code_velocity")
+        registry.add_field((ptype, "particle_position_relative_%s" % ax),
+            particle_type = True, function = p,
+            units = "code_length")
 
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        vel = vel - np.reshape(bv, (3, 1))
-        vel = vel.T
-        L, vel = modify_reference_frame(center, normal, V=vel)
-        vel = vel.T
-        return vel[0]
-
-    registry.add_field((ptype, "particle_velocity_relative_x"),
-              function=_particle_velocity_relative_x,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
-
-    def _particle_velocity_relative_y(field, data):
-        """The y component of the particle velocities in an arbitrary coordinate
-        system
-
-        Relative to the coordinate system defined by the *normal* vector,
-        *bulk_velocity* vector and *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter('bulk_velocity')
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        vel = vel - np.reshape(bv, (3, 1))
-        vel = vel.T
-        L, vel = modify_reference_frame(center, normal, V=vel)
-        vel = vel.T
-        return vel[1]
-
-    registry.add_field((ptype, "particle_velocity_relative_y"),
-              function=_particle_velocity_relative_y,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
-
-    def _particle_velocity_relative_z(field, data):
-        """The z component of the particle velocities in an arbitrary coordinate
-        system
-
-        Relative to the coordinate system defined by the *normal* vector,
-        *bulk_velocity* vector and *center* field parameters.
-
-        Note that the orientation of the x and y axes are arbitrary.
-        """
-        normal = data.get_field_parameter('normal')
-        center = data.get_field_parameter('center')
-        bv = data.get_field_parameter("bulk_velocity")
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
-        bv = vel - np.reshape(bv, (3, 1))
-        vel = vel.T
-        L, vel = modify_reference_frame(center, normal, V=vel)
-        vel = vel.T
-        return vel[2]
-
-    registry.add_field((ptype, "particle_velocity_relative_z"),
-              function=_particle_velocity_relative_z,
-              particle_type=True, units="cm/s",
-              validators=[ValidateParameter("normal"),
-                          ValidateParameter("center")])
 
     # this is just particle radius but we add it with an alias for the sake of
     # consistent naming
@@ -621,8 +424,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter("center")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_sph_theta(pos, normal), "")
 
@@ -651,8 +453,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter("center")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_sph_phi(pos, normal), "")
 
@@ -683,10 +484,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -728,10 +527,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -765,8 +562,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
-        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
@@ -798,7 +595,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_cyl_r(pos, normal),
                            'code_length')
@@ -818,7 +615,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_cyl_theta(pos, normal), "")
 
@@ -837,7 +634,7 @@
         """
         normal = data.get_field_parameter("normal")
         center = data.get_field_parameter('center')
-        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         return data.ds.arr(get_cyl_z(pos, normal),
                            'code_length')
@@ -858,10 +655,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_cyl_theta(pos, center)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
@@ -884,10 +679,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_cyl_theta(pos, center)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
@@ -920,10 +713,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = spos
-        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
-        vel = svel
-        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+        pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+        vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         cylz = get_cyl_z_component(vel, normal)
@@ -1044,3 +835,4 @@
                        units = "g/cm**3")
     return [field_name]
 
+

diff -r f000bf5609433df69390a45396dc3fe60c87b0b5 -r 8d3663ac217ddab3405fc79be7e7b299aa09bbd0 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1224,6 +1224,19 @@
     v = validate_numpy_wrapper_units(v, arrs)
     return v
 
+def ucross(arr1,arr2, registry=None):
+    """Applies the cross product to two YT arrays.
+
+    This wrapper around numpy.cross preserves units.  
+    See the documentation of numpy.cross for full
+    details.
+    """
+
+    v = np.cross(arr1,arr2)
+    units = arr1.units * arr2.units
+    arr = YTArray(v,units, registry=registry)
+    return arr
+
 def uintersect1d(arr1, arr2, assume_unique=False):
     """Find the sorted unique elements of the two input arrays.
 

diff -r f000bf5609433df69390a45396dc3fe60c87b0b5 -r 8d3663ac217ddab3405fc79be7e7b299aa09bbd0 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -301,22 +301,37 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
-    if (L == np.array([0, 0, 1.])).all():
-        # Whew! Nothing to do!
-        if V is None:
+    # First translate the positions to center of mass reference frame.
+    if P is not None:
+        P = P - CoM
+
+    # is the L vector pointing in the Z direction?
+    if np.inner(L[:2], L[:2]) == 0.0:
+        # the reason why we need to explicitly check for the above
+        # is that formula is used in denominator
+        # this just checks if we need to flip the z axis or not
+        if L[2] < 0.0:
+            # this is just a simple flip in direction of the z axis
+            if P is not None:
+                P = -P
+            if V is not None:
+                V = -V
+
+        # return the values
+        if V is None and P is not None:
             return L, P
-        if P is None:
+        elif P is None and V is not None:
             return L, V
         else:
             return L, P, V
-    # First translate the positions to center of mass reference frame.
-    if P is not None:
-        P = P - CoM
+
+    # Normal vector is not aligned with simulation Z axis
+    # Therefore we are going to have to apply a rotation
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
-    LL[2] = 0.
-    theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
-    if L[1] < 0:
+    LL[2] = 0.0
+    theta = np.arccos(np.inner(LL, [1.0, 0.0, 0.0]) / np.inner(LL, LL) ** 0.5)
+    if L[1] < 0.0:
         theta = -theta
     # Now rotate all the position, velocity, and L vectors by this much around
     # the z axis.
@@ -326,16 +341,18 @@
         V = rotate_vector_3D(V, 2, theta)
     L = rotate_vector_3D(L, 2, theta)
     # Now find the angle between L and the z-axis.
-    theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
+    theta = np.arccos(np.inner(L, [0.0, 0.0, 1.0]) / np.inner(L, L) ** 0.5)
     # This time we rotate around the y axis.
     if P is not None:
         P = rotate_vector_3D(P, 1, theta)
     if V is not None:
         V = rotate_vector_3D(V, 1, theta)
     L = rotate_vector_3D(L, 1, theta)
-    if V is None:
+
+    # return the values
+    if V is None and P is not None:
         return L, P
-    if P is None:
+    elif P is None and V is not None:
         return L, V
     else:
         return L, P, V

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list