[yt-svn] commit/yt-3.0: 25 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Jun 19 15:50:16 PDT 2013


25 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/ef93e65cb9db/
Changeset:   ef93e65cb9db
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-23 22:09:39
Summary:     added standard particle deposit fields to ART
Affected #:  1 file

diff -r 440a76cf232df87a3e27304dfd8507f4f965d3f9 -r ef93e65cb9dbb35dcbb7deed9c1e5ad2566fc2f8 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -218,6 +218,7 @@
               particle_type=True,
               convert_function=lambda x: x.convert("particle_mass"))
 
+
 def _particle_age(field, data):
     tr = data["particle_creation_time"]
     return data.pf.current_time - tr
@@ -260,3 +261,108 @@
     return data["particle_mass"]/mass_sun_cgs
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
+
+# Particle Deposition Fields
+ptypes = ["all", "darkmatter", "stars"]
+names  = ["Particle", "Dark Matter", "Stellar"]
+
+# Particle Mass Density Fields
+for ptype, name in zip(ptypes, names):
+    def particle_density(field, data):
+        vol = data["CellVolume"]
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        pmass = dd[(ptype, "particle_mass")]
+        mass = data.deposit(pos, [pmass], method = "sum")
+        return mass / vol
+    add_field("%s_mass_density_deposit" % ptype, function=particle_density, 
+              particle_type=False, take_log=True, units=r'g/cm^{3}',
+              display_name="%s Density" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Mass Fields
+for ptype, name in zip(ptypes, names):
+    def particle_count(field, data):
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        mass = data.deposit(pos, method = "sum")
+        return mass
+    add_field("%s_mass_deposit" % ptype, function=particle_density, 
+              particle_type=False, take_log=True, units=r'1/cm^{3}',
+              display_name="%s Mass Density" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Number Density Fields
+for ptype, name in zip(ptypes, names):
+    def particle_count(field, data):
+        vol = data["CellVolume"]
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        count = data.deposit(pos, method = "count")
+        return count / vol
+    add_field("%s_number_density_deposit" % ptype, function=particle_density, 
+              particle_type=False, take_log=True, units=r'1/cm^{3}',
+              display_name="%s Number Density" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Number Fields
+for ptype, name in zip(ptypes, names):
+    def particle_count(field, data):
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        count = data.deposit(pos, method = "count")
+        return count 
+    add_field("%s_number_deposit" % ptype, function=particle_density, 
+              particle_type=False, take_log=True, units=r'1/cm^{3}',
+              display_name="%s Number" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Velocity Fields
+for ptype, name in zip(ptypes, names):
+    for axis in 'xyz':
+        def particle_velocity(field, data):
+            pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                                   for ax in 'xyz'])
+            vel = data[(ptype, "particle_velocity_%s" % axis)]
+            vel_deposit = data.deposit(vel, method = "sum")
+            return vel_deposit
+        add_field("%s_velocity_%s_deposit" % (ptype, axis), 
+                  function=particle_velocity, 
+                  particle_type=False, take_log=False, units=r'cm/s',
+                  display_name="%s Velocity %s" % (name, axis.upper()), 
+                  validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Mass-weighted Velocity Fields
+for ptype, name in zip(ptypes, names):
+    for axis in 'xyz':
+        def particle_velocity_weighted(field, data):
+            pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                                   for ax in 'xyz'])
+            vel  = data[(ptype, "particle_velocity_%s" % axis)]
+            mass = data[(ptype, "particle_mass")]
+            vel_deposit = data.deposit(vel * mass, method = "sum")
+            norm = data.deposit(mass, method = "sum")
+            return vel_deposit / norm
+        add_field("%s_weighted_velocity_%s_deposit" % (ptype, axis), 
+                  function=particle_velocity, 
+                  particle_type=False, take_log=False, units=r'cm/s',
+                  display_name="%s Velocity %s" % (name, axis.upper()), 
+                  validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Mass-weighted Velocity Magnitude Fields
+for ptype, name in zip(ptypes, names):
+    def particle_velocity_weighted(field, data):
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        vels = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        vel = np.sqrt(np.sum(vels, axis=0))
+        mass = data[(ptype, "particle_mass")]
+        vel_deposit = data.deposit(vel * mass, method = "sum")
+        norm = data.deposit(mass, method = "sum")
+        return vel_deposit / norm
+    add_field("%s_weighted_velocity_deposit" % (ptype, axis), 
+              function=particle_velocity, 
+              particle_type=False, take_log=False, units=r'cm/s',
+              display_name="%s Velocity" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')


https://bitbucket.org/yt_analysis/yt-3.0/commits/39b6f75e60ed/
Changeset:   39b6f75e60ed
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-24 03:46:36
Summary:     new ART particle deposit fields work
Affected #:  1 file

diff -r ef93e65cb9dbb35dcbb7deed9c1e5ad2566fc2f8 -r 39b6f75e60edbf3e7f2630da2571e19c94148f37 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -270,9 +270,9 @@
 for ptype, name in zip(ptypes, names):
     def particle_density(field, data):
         vol = data["CellVolume"]
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
-        pmass = dd[(ptype, "particle_mass")]
+        pmass = data[(ptype, "particle_mass")]
         mass = data.deposit(pos, [pmass], method = "sum")
         return mass / vol
     add_field("%s_mass_density_deposit" % ptype, function=particle_density, 
@@ -283,7 +283,7 @@
 # Particle Mass Fields
 for ptype, name in zip(ptypes, names):
     def particle_count(field, data):
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
         mass = data.deposit(pos, method = "sum")
         return mass
@@ -296,7 +296,7 @@
 for ptype, name in zip(ptypes, names):
     def particle_count(field, data):
         vol = data["CellVolume"]
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
         count = data.deposit(pos, method = "count")
         return count / vol
@@ -308,7 +308,7 @@
 # Particle Number Fields
 for ptype, name in zip(ptypes, names):
     def particle_count(field, data):
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
         count = data.deposit(pos, method = "count")
         return count 
@@ -321,7 +321,7 @@
 for ptype, name in zip(ptypes, names):
     for axis in 'xyz':
         def particle_velocity(field, data):
-            pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+            pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                    for ax in 'xyz'])
             vel = data[(ptype, "particle_velocity_%s" % axis)]
             vel_deposit = data.deposit(vel, method = "sum")
@@ -336,7 +336,7 @@
 for ptype, name in zip(ptypes, names):
     for axis in 'xyz':
         def particle_velocity_weighted(field, data):
-            pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+            pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                    for ax in 'xyz'])
             vel  = data[(ptype, "particle_velocity_%s" % axis)]
             mass = data[(ptype, "particle_mass")]
@@ -352,16 +352,16 @@
 # Particle Mass-weighted Velocity Magnitude Fields
 for ptype, name in zip(ptypes, names):
     def particle_velocity_weighted(field, data):
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
-        vels = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        vels = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
         vel = np.sqrt(np.sum(vels, axis=0))
         mass = data[(ptype, "particle_mass")]
         vel_deposit = data.deposit(vel * mass, method = "sum")
         norm = data.deposit(mass, method = "sum")
         return vel_deposit / norm
-    add_field("%s_weighted_velocity_deposit" % (ptype, axis), 
+    add_field("%s_weighted_velocity_deposit" % (ptype), 
               function=particle_velocity, 
               particle_type=False, take_log=False, units=r'cm/s',
               display_name="%s Velocity" % name, 


https://bitbucket.org/yt_analysis/yt-3.0/commits/92a32f6c8c9f/
Changeset:   92a32f6c8c9f
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-24 08:33:31
Summary:     4x speedup in particle access via caching
Affected #:  1 file

diff -r 39b6f75e60edbf3e7f2630da2571e19c94148f37 -r 92a32f6c8c9f4ebaac7728192ba617c7c9c2f519 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -42,6 +42,8 @@
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
     tb, ages = None, None
+    cache = {}
+    masks = {}
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -68,85 +70,102 @@
                 cp += subset.cell_count
         return tr
 
+    def _get_mask(self, selector, ftype):
+        if ftype in self.masks.keys():
+            return self.masks[ftype]
+        pf = self.pf
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
+        rp = lambda ax: read_particles(
+            self.file_particle, self.Nrow, idxa=idxa,
+            idxb=idxb, fields=ax)
+        x, y, z = rp(['x','y','z'])
+        dd = pf.domain_dimensions[0]
+        off = 1.0/dd
+        x, y, z = (t/dd - off for t in (x, y, z))
+        mask = selector.select_points(x, y, z)
+        self.masks[ftype] = mask
+        # save the particle positions if asked
+        for ax in 'xyz':
+            f = (ftype, "particle_position_%s" % ax)
+            self.cache[f] = vars()[ax]
+        return self.masks[ftype]
+
+    def _get_field(self,  mask, field):
+        if field in self.cache.keys():
+            return self.cache[field][mask].astype('f8')
+        tr = {}
+        ftype, fname = field
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, self.ls, ptmax)
+        npa = idxb - idxa
+        sizes = np.diff(np.concatenate(([0], self.ls)))
+        rp = lambda ax: read_particles(
+            self.file_particle, self.Nrow, idxa=idxa,
+            idxb=idxb, fields=ax)
+        for i, ax in enumerate('xyz'):
+            if fname.startswith("particle_position_%s" % ax):
+                tr[field] = rp([ax])
+            if fname.startswith("particle_velocity_%s" % ax):
+                tr[field] = rp(['v'+ax])
+        if fname == "particle_mass":
+            a = 0
+            data = np.zeros(npa, dtype='f8')
+            for ptb, size, m in zip(pbool, sizes, self.ws):
+                if ptb:
+                    data[a:a+size] = m
+                    a += size
+            tr[field] = data
+        elif fname == "particle_index":
+            tr[field] = np.arange(idxa, idxb).astype('int64')
+        elif fname == "particle_type":
+            a = 0
+            data = np.zeros(npa, dtype='int')
+            for i, (ptb, size) in enumerate(zip(pbool, sizes)):
+                if ptb:
+                    data[a: a + size] = i
+                    a += size
+            tr[field] = data
+        if pbool[-1] and fname in particle_star_fields:
+            data = read_star_field(self.file_stars, field=fname)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            nstars = self.ls[-1]-self.ls[-2]
+            if nstars > 0:
+                temp[-nstars:] = data
+            tr[field] = temp
+        if fname == "particle_creation_time":
+            self.tb, self.ages, data = interpolate_ages(
+                tr[field][-nstars:],
+                self.file_stars,
+                self.tb,
+                self.ages,
+                self.pf.current_time)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            temp[-nstars:] = data
+            tr[field] = temp
+            del data
+        if tr == {}:
+            tr = dict((f, np.array([])) for f in fields)
+        self.cache[field] = tr[field].astype('f8')
+        return self.cache[field][mask]
+
     def _read_particle_selection(self, chunks, selector, fields):
         tr = {}
         fields_read = []
-        for chunk in chunks:
-            level = chunk.objs[0].domain.domain_level
-            pf = chunk.objs[0].domain.pf
-            masks = {}
-            ws, ls = pf.parameters["wspecies"], pf.parameters["lspecies"]
-            sizes = np.diff(np.concatenate(([0], ls)))
-            ptmax = ws[-1]
-            npt = ls[-1]
-            nstars = ls[-1]-ls[-2]
-            file_particle = pf._file_particle_data
-            file_stars = pf._file_particle_stars
-            ftype_old = None
-            for field in fields:
-                if field in fields_read:
-                    continue
-                ftype, fname = field
-                pbool, idxa, idxb = _determine_field_size(pf, ftype, ls, ptmax)
-                npa = idxb-idxa
-                if not ftype_old == ftype:
-                    Nrow = pf.parameters["Nrow"]
-                    rp = lambda ax: read_particles(
-                        file_particle, Nrow, idxa=idxa,
-                        idxb=idxb, field=ax)
-                    x, y, z = (rp(ax) for ax in 'xyz')
-                    dd = pf.domain_dimensions[0]
-                    off = 1.0/dd
-                    x, y, z = (t.astype('f8')/dd - off for t in (x, y, z))
-                    mask = selector.select_points(x, y, z)
-                    size = mask.sum()
-                for i, ax in enumerate('xyz'):
-                    if fname.startswith("particle_position_%s" % ax):
-                        tr[field] = vars()[ax]
-                    if fname.startswith("particle_velocity_%s" % ax):
-                        tr[field] = rp('v'+ax)
-                if fname == "particle_mass":
-                    a = 0
-                    data = np.zeros(npa, dtype='f8')
-                    for ptb, size, m in zip(pbool, sizes, ws):
-                        if ptb:
-                            data[a:a+size] = m
-                            a += size
-                    tr[field] = data
-                elif fname == "particle_index":
-                    tr[field] = np.arange(idxa, idxb).astype('int64')
-                elif fname == "particle_type":
-                    a = 0
-                    data = np.zeros(npa, dtype='int')
-                    for i, (ptb, size) in enumerate(zip(pbool, sizes)):
-                        if ptb:
-                            data[a:a+size] = i
-                            a += size
-                    tr[field] = data
-                if pbool[-1] and fname in particle_star_fields:
-                    data = read_star_field(file_stars, field=fname)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    if nstars > 0:
-                        temp[-nstars:] = data
-                    tr[field] = temp
-                if fname == "particle_creation_time":
-                    self.tb, self.ages, data = interpolate_ages(
-                        tr[field][-nstars:],
-                        file_stars,
-                        self.tb,
-                        self.ages,
-                        pf.current_time)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    temp[-nstars:] = data
-                    tr[field] = temp
-                    del data
-                tr[field] = tr[field][mask].astype('f8')
-                ftype_old = ftype
-                fields_read.append(field)
-        if tr == {}:
-            tr = dict((f, np.array([])) for f in fields)
-        return tr
-
+        chunk = [c for c in chunks][0]
+        self.pf = chunk.objs[0].domain.pf
+        self.ws = self.pf.parameters["wspecies"]
+        self.ls = self.pf.parameters["lspecies"]
+        self.file_particle = self.pf._file_particle_data
+        self.file_stars = self.pf._file_particle_stars
+        self.Nrow = self.pf.parameters["Nrow"]
+        data = {}
+        #import pdb; pdb.set_trace()
+        for f in fields:
+            ftype, fname = f
+            mask = self._get_mask(selector, ftype)
+            data[f] =  self._get_field(mask, f)
+        return data
 
 def _determine_field_size(pf, field, lspecies, ptmax):
     pbool = np.zeros(len(lspecies), dtype="bool")
@@ -361,27 +380,29 @@
     return ranges
 
 
-def read_particles(file, Nrow, idxa, idxb, field):
+def read_particles(file, Nrow, idxa, idxb, fields):
     words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4  # for file_particle_data; not always true?
     np_per_page = Nrow**2  # defined in ART a_setup.h, # of particles/page
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
-    data = np.array([], 'f4')
     fh = open(file, 'r')
     skip, count = idxa, idxb - idxa
     kwargs = dict(words=words, real_size=real_size, 
                   np_per_page=np_per_page, num_pages=num_pages)
-    ranges = get_ranges(skip, count, field, **kwargs)
-    data = None
-    for seek, this_count in ranges:
-        fh.seek(seek)
-        temp = np.fromfile(fh, count=this_count, dtype='>f4')
-        if data is None:
-            data = temp
-        else:
-            data = np.concatenate((data, temp))
+    arrs = []
+    for field in fields:
+        ranges = get_ranges(skip, count, field, **kwargs)
+        data = None
+        for seek, this_count in ranges:
+            fh.seek(seek)
+            temp = np.fromfile(fh, count=this_count, dtype='>f4')
+            if data is None:
+                data = temp
+            else:
+                data = np.concatenate((data, temp))
+        arrs.append(data.astype('f8'))
     fh.close()
-    return data
+    return arrs
 
 
 def read_star_field(file, field=None):


https://bitbucket.org/yt_analysis/yt-3.0/commits/14da5639265a/
Changeset:   14da5639265a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-27 22:45:34
Summary:     fixed mask caching with particles & chunks
Affected #:  2 files

diff -r 92a32f6c8c9f4ebaac7728192ba617c7c9c2f519 -r 14da5639265a0cf4e73a639e817aa28abd73146d yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -274,7 +274,8 @@
                                for ax in 'xyz'])
         pmass = data[(ptype, "particle_mass")]
         mass = data.deposit(pos, [pmass], method = "sum")
-        return mass / vol
+        dens = mass / vol
+        return dens
     add_field("%s_mass_density_deposit" % ptype, function=particle_density, 
               particle_type=False, take_log=True, units=r'g/cm^{3}',
               display_name="%s Density" % name, 

diff -r 92a32f6c8c9f4ebaac7728192ba617c7c9c2f519 -r 14da5639265a0cf4e73a639e817aa28abd73146d yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -43,7 +43,6 @@
     _data_style = "art"
     tb, ages = None, None
     cache = {}
-    masks = {}
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -71,8 +70,6 @@
         return tr
 
     def _get_mask(self, selector, ftype):
-        if ftype in self.masks.keys():
-            return self.masks[ftype]
         pf = self.pf
         ptmax = self.ws[-1]
         pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
@@ -84,16 +81,15 @@
         off = 1.0/dd
         x, y, z = (t/dd - off for t in (x, y, z))
         mask = selector.select_points(x, y, z)
-        self.masks[ftype] = mask
         # save the particle positions if asked
         for ax in 'xyz':
             f = (ftype, "particle_position_%s" % ax)
             self.cache[f] = vars()[ax]
-        return self.masks[ftype]
+        return mask
 
-    def _get_field(self,  mask, field):
+    def _get_field(self,  field):
         if field in self.cache.keys():
-            return self.cache[field][mask].astype('f8')
+            return self.cache[field]
         tr = {}
         ftype, fname = field
         ptmax = self.ws[-1]
@@ -117,7 +113,7 @@
                     a += size
             tr[field] = data
         elif fname == "particle_index":
-            tr[field] = np.arange(idxa, idxb).astype('int64')
+            tr[field] = np.arange(idxa, idxb)
         elif fname == "particle_type":
             a = 0
             data = np.zeros(npa, dtype='int')
@@ -146,25 +142,24 @@
             del data
         if tr == {}:
             tr = dict((f, np.array([])) for f in fields)
-        self.cache[field] = tr[field].astype('f8')
-        return self.cache[field][mask]
+        self.cache[field] = tr[field]
+        return self.cache[field]
 
     def _read_particle_selection(self, chunks, selector, fields):
-        tr = {}
-        fields_read = []
-        chunk = [c for c in chunks][0]
-        self.pf = chunk.objs[0].domain.pf
-        self.ws = self.pf.parameters["wspecies"]
-        self.ls = self.pf.parameters["lspecies"]
-        self.file_particle = self.pf._file_particle_data
-        self.file_stars = self.pf._file_particle_stars
-        self.Nrow = self.pf.parameters["Nrow"]
-        data = {}
-        #import pdb; pdb.set_trace()
+        for chunk in chunks:
+            self.pf = chunk.objs[0].domain.pf
+            self.ws = self.pf.parameters["wspecies"]
+            self.ls = self.pf.parameters["lspecies"]
+            self.file_particle = self.pf._file_particle_data
+            self.file_stars = self.pf._file_particle_stars
+            self.Nrow = self.pf.parameters["Nrow"]
+            break
+        data = {f:np.array([]) for f in fields}
         for f in fields:
             ftype, fname = f
             mask = self._get_mask(selector, ftype)
-            data[f] =  self._get_field(mask, f)
+            arr = self._get_field(f)[mask].astype('f8')
+            data[f] = np.concatenate((arr, data[f]))
         return data
 
 def _determine_field_size(pf, field, lspecies, ptmax):


https://bitbucket.org/yt_analysis/yt-3.0/commits/cf466a241e92/
Changeset:   cf466a241e92
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-27 22:48:25
Summary:     added particle types; fixes dd[('stars', 'particle_mass')] translating into ('all','particle_mass')
Affected #:  1 file

diff -r 14da5639265a0cf4e73a639e817aa28abd73146d -r cf466a241e92016d150cb4b78d1e5df2699d6357 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -136,6 +136,10 @@
                 self.parameter_file.particle_types.append("specie%i" % specie)
         else:
             self.parameter_file.particle_types = []
+        for ptype in self.parameter_file.particle_types:
+            for pfield in self.particle_field_list:
+                pfn = (ptype, pfield)
+                self.field_list.append(pfn)
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()


https://bitbucket.org/yt_analysis/yt-3.0/commits/effae4caa73a/
Changeset:   effae4caa73a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-27 23:39:17
Summary:     improved particle caching, down to 45s deposit
Affected #:  1 file

diff -r cf466a241e92016d150cb4b78d1e5df2699d6357 -r effae4caa73acd2ccc4177f275ecb13520327ac6 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -73,27 +73,21 @@
         pf = self.pf
         ptmax = self.ws[-1]
         pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
-        rp = lambda ax: read_particles(
-            self.file_particle, self.Nrow, idxa=idxa,
-            idxb=idxb, fields=ax)
-        x, y, z = rp(['x','y','z'])
-        dd = pf.domain_dimensions[0]
-        off = 1.0/dd
-        x, y, z = (t/dd - off for t in (x, y, z))
+        pstr = 'particle_position_%s'
+        x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
         mask = selector.select_points(x, y, z)
-        # save the particle positions if asked
-        for ax in 'xyz':
-            f = (ftype, "particle_position_%s" % ax)
-            self.cache[f] = vars()[ax]
         return mask
 
     def _get_field(self,  field):
         if field in self.cache.keys():
+            mylog.debug("Cached %s", str(field))
             return self.cache[field]
+        mylog.debug("Reading %s", str(field))
         tr = {}
         ftype, fname = field
         ptmax = self.ws[-1]
-        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, self.ls, ptmax)
+        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, 
+                                                  self.ls, ptmax)
         npa = idxb - idxa
         sizes = np.diff(np.concatenate(([0], self.ls)))
         rp = lambda ax: read_particles(
@@ -101,7 +95,9 @@
             idxb=idxb, fields=ax)
         for i, ax in enumerate('xyz'):
             if fname.startswith("particle_position_%s" % ax):
-                tr[field] = rp([ax])
+                dd = self.pf.domain_dimensions[0]
+                off = 1.0/dd
+                tr[field] = rp([ax])[0]/dd - off
             if fname.startswith("particle_velocity_%s" % ax):
                 tr[field] = rp(['v'+ax])
         if fname == "particle_mass":


https://bitbucket.org/yt_analysis/yt-3.0/commits/a619bc2e3f8a/
Changeset:   a619bc2e3f8a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-27 23:48:02
Summary:     masks now cached; next optimization will remove particles when deposited
Affected #:  1 file

diff -r effae4caa73acd2ccc4177f275ecb13520327ac6 -r a619bc2e3f8a6b80079be6dc86f7624b2add44aa yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -43,6 +43,7 @@
     _data_style = "art"
     tb, ages = None, None
     cache = {}
+    masks = {}
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -70,13 +71,17 @@
         return tr
 
     def _get_mask(self, selector, ftype):
+        key = (selector, ftype)
+        if key in self.masks.keys():
+            return self.masks[key]
         pf = self.pf
         ptmax = self.ws[-1]
         pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
         pstr = 'particle_position_%s'
         x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
         mask = selector.select_points(x, y, z)
-        return mask
+        self.masks[key] = mask
+        return self.masks[key]
 
     def _get_field(self,  field):
         if field in self.cache.keys():


https://bitbucket.org/yt_analysis/yt-3.0/commits/d1ae641d84e2/
Changeset:   d1ae641d84e2
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-28 06:33:23
Summary:     removed chunking for loop
Affected #:  1 file

diff -r a619bc2e3f8a6b80079be6dc86f7624b2add44aa -r d1ae641d84e2dbd75badebd26b0a06b137ebfdb9 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -147,14 +147,13 @@
         return self.cache[field]
 
     def _read_particle_selection(self, chunks, selector, fields):
-        for chunk in chunks:
-            self.pf = chunk.objs[0].domain.pf
-            self.ws = self.pf.parameters["wspecies"]
-            self.ls = self.pf.parameters["lspecies"]
-            self.file_particle = self.pf._file_particle_data
-            self.file_stars = self.pf._file_particle_stars
-            self.Nrow = self.pf.parameters["Nrow"]
-            break
+        chunk = chunks.next()
+        self.pf = chunk.objs[0].domain.pf
+        self.ws = self.pf.parameters["wspecies"]
+        self.ls = self.pf.parameters["lspecies"]
+        self.file_particle = self.pf._file_particle_data
+        self.file_stars = self.pf._file_particle_stars
+        self.Nrow = self.pf.parameters["Nrow"]
         data = {f:np.array([]) for f in fields}
         for f in fields:
             ftype, fname = f


https://bitbucket.org/yt_analysis/yt-3.0/commits/fbdf1ecaecef/
Changeset:   fbdf1ecaecef
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-04 23:13:23
Summary:     added weighted mean deposit
Affected #:  1 file

diff -r d1ae641d84e2dbd75badebd26b0a06b137ebfdb9 -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -232,3 +232,39 @@
 
 deposit_std = StdParticleField
 
+
+cdef class WeightedMeanParticleField(ParticleDepositOperation):
+    # Deposit both mass * field and mass into two scalars
+    # then in finalize divide mass * field / mass
+    cdef np.float64_t *wf
+    cdef public object owf
+    cdef np.float64_t *w
+    cdef public object ow
+    def initialize(self):
+        self.owf = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray wfarr = self.owf
+        self.wf = <np.float64_t*> wfarr.data
+        
+        self.ow = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray wfarr = self.ow
+        self.w = <np.float64_t*> warr.data
+    
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, 
+                      np.float64_t ppos[3],
+                      np.float64_t *fields 
+                      ):
+        cdef int ii[3], i
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
+        self.w[ gind(ii[0], ii[1], ii[2], dim) + offset] += fields[1]
+        self.wf[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0] * fields[1]
+        
+    def finalize(self):
+        return self.owf / self.ow
+
+deposit_weighted_mean= WeightedMeanParticleField
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/4fb8998b8dca/
Changeset:   4fb8998b8dca
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-05 00:05:38
Summary:     fixed up the fields
Affected #:  4 files

diff -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc -r 4fb8998b8dca4254bd25c761241a477addfba81b yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -25,6 +25,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import numpy as np
+
+from yt.funcs import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -262,108 +264,112 @@
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
 
+# Modeled after the TIPSY / Gadget frontend particle deposit fields
+def _particle_functions(ptype, pname):
+    mass_name = "particle_mass"
+    def particle_pos(data, axes="xyz"):
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]\
+                                    for ax in axes])
+        if len(axes)==1:
+            return pos[0]
+        return pos
+
+    def particle_vel(data, axes="xyz"):
+        pos = np.column_stack([data[(ptype, "particle_velocity_%s" % ax)]\
+                                    for ax in axes])
+        if len(axes)==1:
+            return pos[0]
+        return pos
+
+    def particle_count(field, data):
+        pos = particle_pos(data)
+        d = data.deposit(pos, method = "count")
+        return d
+    
+    add_field("deposit_%s_count" % ptype,
+             function = particle_count,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Count}" % pname,
+             projection_conversion = '1')
+
+    def particle_mass(field, data):
+        pos = particle_pos(data)
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        return d
+
+    add_field("deposit_%s_mass" % ptype,
+             function = particle_mass,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Mass}" % pname,
+             units = r"\mathrm{g}",
+             projected_units = r"\mathrm{g}\/\mathrm{cm}",
+             projection_conversion = 'cm')
+
+    def particle_density(field, data):
+        print data.shape
+        pos = particle_pos(data)
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d /= data["CellVolume"]
+        return d
+
+    add_field("deposit_%s_density" % ptype,
+             function = particle_density,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Density}" % pname,
+             units = r"\mathrm{g}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{g}/\mathrm{cm}^{-2}",
+             projection_conversion = 'cm')
+
+    def particle_number_density(field, data):
+        pos = particle_pos(data)
+        d = data.deposit(pos, method = "count")
+        d /= data["CellVolume"]
+        return d
+
+    add_field("deposit_%s_number_density" % ptype,
+             function = particle_density,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Number Density}" % pname,
+             units = r"\mathrm{1}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{1}/\mathrm{cm}^{-2}",
+             projection_conversion = 'cm')
+
+    def particle_mass_velocity(field, data):
+        pos = particle_pos(data)
+        vel = particle_vel(data, ax) 
+        mass = data[ptype, mass_name]
+        d  = data.deposit(pos, [mass, vel], method = "weighted_mean")
+        d /= data.deposit(pos, [mass], method = "sum")
+        return d
+
+    add_field("deposit_%s_weighted_velocity" % ptype,
+             function = particle_mass,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Mass Weighted Velocity}" % pname,
+             units = r"\mathrm{g}",
+             projected_units = r"\mathrm{g}\/\mathrm{cm}",
+             projection_conversion = 'cm')
+
+    add_field((ptype, "ParticleMass"),
+            function = TranslationFunc((ptype, mass_name)),
+            particle_type = True,
+            units = r"\mathrm{g}")
+
+    def _ParticleMassMsun(field, data):
+        return data[ptype, mass_name].copy()
+    def _conv_Msun(data):
+        return 1.0/mass_sun_cgs
+
+    add_field((ptype, "ParticleMassMsun"),
+            function = _ParticleMassMsun,
+            convert_function = _conv_Msun,
+            particle_type = True,
+            units = r"\mathrm{M}_\odot")
+
 # Particle Deposition Fields
-ptypes = ["all", "darkmatter", "stars"]
-names  = ["Particle", "Dark Matter", "Stellar"]
+_ptypes = ["all", "darkmatter", "stars"]
+_pnames  = ["Particle", "Dark Matter", "Stellar"]
 
-# Particle Mass Density Fields
-for ptype, name in zip(ptypes, names):
-    def particle_density(field, data):
-        vol = data["CellVolume"]
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        pmass = data[(ptype, "particle_mass")]
-        mass = data.deposit(pos, [pmass], method = "sum")
-        dens = mass / vol
-        return dens
-    add_field("%s_mass_density_deposit" % ptype, function=particle_density, 
-              particle_type=False, take_log=True, units=r'g/cm^{3}',
-              display_name="%s Density" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')
+for _ptype, _pname in zip(_ptypes, _pnames):
+    _particle_functions(_ptype, _pname)
 
-# Particle Mass Fields
-for ptype, name in zip(ptypes, names):
-    def particle_count(field, data):
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        mass = data.deposit(pos, method = "sum")
-        return mass
-    add_field("%s_mass_deposit" % ptype, function=particle_density, 
-              particle_type=False, take_log=True, units=r'1/cm^{3}',
-              display_name="%s Mass Density" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Number Density Fields
-for ptype, name in zip(ptypes, names):
-    def particle_count(field, data):
-        vol = data["CellVolume"]
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        count = data.deposit(pos, method = "count")
-        return count / vol
-    add_field("%s_number_density_deposit" % ptype, function=particle_density, 
-              particle_type=False, take_log=True, units=r'1/cm^{3}',
-              display_name="%s Number Density" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Number Fields
-for ptype, name in zip(ptypes, names):
-    def particle_count(field, data):
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        count = data.deposit(pos, method = "count")
-        return count 
-    add_field("%s_number_deposit" % ptype, function=particle_density, 
-              particle_type=False, take_log=True, units=r'1/cm^{3}',
-              display_name="%s Number" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Velocity Fields
-for ptype, name in zip(ptypes, names):
-    for axis in 'xyz':
-        def particle_velocity(field, data):
-            pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                                   for ax in 'xyz'])
-            vel = data[(ptype, "particle_velocity_%s" % axis)]
-            vel_deposit = data.deposit(vel, method = "sum")
-            return vel_deposit
-        add_field("%s_velocity_%s_deposit" % (ptype, axis), 
-                  function=particle_velocity, 
-                  particle_type=False, take_log=False, units=r'cm/s',
-                  display_name="%s Velocity %s" % (name, axis.upper()), 
-                  validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Mass-weighted Velocity Fields
-for ptype, name in zip(ptypes, names):
-    for axis in 'xyz':
-        def particle_velocity_weighted(field, data):
-            pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                                   for ax in 'xyz'])
-            vel  = data[(ptype, "particle_velocity_%s" % axis)]
-            mass = data[(ptype, "particle_mass")]
-            vel_deposit = data.deposit(vel * mass, method = "sum")
-            norm = data.deposit(mass, method = "sum")
-            return vel_deposit / norm
-        add_field("%s_weighted_velocity_%s_deposit" % (ptype, axis), 
-                  function=particle_velocity, 
-                  particle_type=False, take_log=False, units=r'cm/s',
-                  display_name="%s Velocity %s" % (name, axis.upper()), 
-                  validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Mass-weighted Velocity Magnitude Fields
-for ptype, name in zip(ptypes, names):
-    def particle_velocity_weighted(field, data):
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        vels = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        vel = np.sqrt(np.sum(vels, axis=0))
-        mass = data[(ptype, "particle_mass")]
-        vel_deposit = data.deposit(vel * mass, method = "sum")
-        norm = data.deposit(mass, method = "sum")
-        return vel_deposit / norm
-    add_field("%s_weighted_velocity_deposit" % (ptype), 
-              function=particle_velocity, 
-              particle_type=False, take_log=False, units=r'cm/s',
-              display_name="%s Velocity" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')

diff -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc -r 4fb8998b8dca4254bd25c761241a477addfba81b yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -44,6 +44,7 @@
     tb, ages = None, None
     cache = {}
     masks = {}
+    caching = False
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -72,7 +73,7 @@
 
     def _get_mask(self, selector, ftype):
         key = (selector, ftype)
-        if key in self.masks.keys():
+        if key in self.masks.keys() and self.caching:
             return self.masks[key]
         pf = self.pf
         ptmax = self.ws[-1]
@@ -80,11 +81,14 @@
         pstr = 'particle_position_%s'
         x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
         mask = selector.select_points(x, y, z)
-        self.masks[key] = mask
-        return self.masks[key]
+        if self.caching:
+            self.masks[key] = mask
+            return self.masks[key]
+        else:
+            return mask
 
     def _get_field(self,  field):
-        if field in self.cache.keys():
+        if field in self.cache.keys() and self.caching:
             mylog.debug("Cached %s", str(field))
             return self.cache[field]
         mylog.debug("Reading %s", str(field))
@@ -143,8 +147,11 @@
             del data
         if tr == {}:
             tr = dict((f, np.array([])) for f in fields)
-        self.cache[field] = tr[field]
-        return self.cache[field]
+        if self.caching:
+            self.cache[field] = tr[field]
+            return self.cache[field]
+        else:
+            return tr[field]
 
     def _read_particle_selection(self, chunks, selector, fields):
         chunk = chunks.next()

diff -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc -r 4fb8998b8dca4254bd25c761241a477addfba81b yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -246,7 +246,7 @@
         self.wf = <np.float64_t*> wfarr.data
         
         self.ow = np.zeros(self.nvals, dtype='float64')
-        cdef np.ndarray wfarr = self.ow
+        cdef np.ndarray warr = self.ow
         self.w = <np.float64_t*> warr.data
     
     @cython.cdivision(True)

diff -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc -r 4fb8998b8dca4254bd25c761241a477addfba81b yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -142,8 +142,10 @@
 elif exe_name in \
         ["mpi4py", "embed_enzo",
          "python"+sys.version[:3]+"-mpi"] \
-    or '_parallel' in dir(sys) \
-    or any(["ipengine" in arg for arg in sys.argv]):
-    parallel_capable = turn_on_parallelism()
+        or '_parallel' in dir(sys) \
+        or any(["ipengine" in arg for arg in sys.argv]) \
+        or any(["cluster-id" in arg for arg in sys.argv]):
+    #parallel_capable = turn_on_parallelism()
+    pass
 else:
     parallel_capable = False


https://bitbucket.org/yt_analysis/yt-3.0/commits/d55ca706eeb6/
Changeset:   d55ca706eeb6
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-05 00:39:10
Summary:     fixing velocity fields return
Affected #:  1 file

diff -r 4fb8998b8dca4254bd25c761241a477addfba81b -r d55ca706eeb69be62b094887d41ec708bd6cb53f yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -108,7 +108,7 @@
                 off = 1.0/dd
                 tr[field] = rp([ax])[0]/dd - off
             if fname.startswith("particle_velocity_%s" % ax):
-                tr[field] = rp(['v'+ax])
+                tr[field], = rp(['v'+ax])
         if fname == "particle_mass":
             a = 0
             data = np.zeros(npa, dtype='f8')


https://bitbucket.org/yt_analysis/yt-3.0/commits/04285b5f9f0b/
Changeset:   04285b5f9f0b
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-05 00:39:32
Summary:     implementing weighted mean deposit
Affected #:  1 file

diff -r d55ca706eeb69be62b094887d41ec708bd6cb53f -r 04285b5f9f0b4922a51586f013661f10095be8b5 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -306,7 +306,6 @@
              projection_conversion = 'cm')
 
     def particle_density(field, data):
-        print data.shape
         pos = particle_pos(data)
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
         d /= data["CellVolume"]
@@ -334,21 +333,25 @@
              projected_units = r"\mathrm{1}/\mathrm{cm}^{-2}",
              projection_conversion = 'cm')
 
-    def particle_mass_velocity(field, data):
-        pos = particle_pos(data)
-        vel = particle_vel(data, ax) 
-        mass = data[ptype, mass_name]
-        d  = data.deposit(pos, [mass, vel], method = "weighted_mean")
-        d /= data.deposit(pos, [mass], method = "sum")
-        return d
 
-    add_field("deposit_%s_weighted_velocity" % ptype,
-             function = particle_mass,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass Weighted Velocity}" % pname,
-             units = r"\mathrm{g}",
-             projected_units = r"\mathrm{g}\/\mathrm{cm}",
-             projection_conversion = 'cm')
+    for ax in "xyz":
+        def particle_mass_velocity(field, data, ax):
+            pos = particle_pos(data)
+            vel = particle_vel(data, ax) 
+            mass = data[ptype, mass_name]
+            d = data.deposit(pos, [vel, mass], method = "weighted_mean")
+            d[~np.isfinite(d)] = 0.0
+            return d
+
+        add_field("deposit_%s_weighted_velocity_%s" % (ptype, ax),
+                 function = lambda f, d: particle_mass_velocity(f, d, ax),
+                 validators = [ValidateSpatial()],
+                 display_name = "\\mathrm{%s Mass Weighted Velocity %s}" % \
+                                (pname, ax.upper()),
+                 units = r"\mathrm{\mathrm{cm}/\mathrm{s}}",
+                 projected_units = r"\mathrm{\mathrm{cm}/\mathrm{s}}",
+                 projection_conversion = '1',
+                 take_log=False)
 
     add_field((ptype, "ParticleMass"),
             function = TranslationFunc((ptype, mass_name)),


https://bitbucket.org/yt_analysis/yt-3.0/commits/f09af31e835a/
Changeset:   f09af31e835a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-05 00:49:05
Summary:     removed accidental changes to yt startup_tasks
Affected #:  1 file

diff -r 04285b5f9f0b4922a51586f013661f10095be8b5 -r f09af31e835ae26014d845c12df7f658f8bfdb2c yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -142,10 +142,8 @@
 elif exe_name in \
         ["mpi4py", "embed_enzo",
          "python"+sys.version[:3]+"-mpi"] \
-        or '_parallel' in dir(sys) \
-        or any(["ipengine" in arg for arg in sys.argv]) \
-        or any(["cluster-id" in arg for arg in sys.argv]):
-    #parallel_capable = turn_on_parallelism()
-    pass
+    or '_parallel' in dir(sys) \
+    or any(["ipengine" in arg for arg in sys.argv]):
+    parallel_capable = turn_on_parallelism()
 else:
     parallel_capable = False


https://bitbucket.org/yt_analysis/yt-3.0/commits/b9bede66ce55/
Changeset:   b9bede66ce55
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:00:38
Summary:     Merged particle_deposit.pyx; included weighted average
Affected #:  117 files

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,11 +4,15 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
 yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
+yt/geometry/fake_octree.c
 yt/geometry/oct_container.c
+yt/geometry/particle_deposit.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
@@ -35,6 +39,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5156,3 +5156,5 @@
 0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
+a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
+954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -10,14 +10,15 @@
 # subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# And, feel free to drop me a line: matthewturk at gmail.com
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
 #
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
 
-if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
     DEST_DIR=${YT_DEST}
 fi
@@ -34,7 +35,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +77,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -97,6 +98,48 @@
 
 LOG_FILE="${DEST_DIR}/yt_install.log"
 
+function write_config
+{
+    CONFIG_FILE=${DEST_DIR}/.yt_config
+
+    echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+    echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
+    echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
+    echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
+    echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
+    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
+    echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
+    echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
+    echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
+    echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
+    echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
+    echo MPL_SUPP_LDFLAGS=${MPL_SUPP_LDFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CFLAGS=${MPL_SUPP_CFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CXXFLAGS=${MPL_SUPP_CXXFLAGS} >> ${CONFIG_FILE}
+    echo MAKE_PROCS=${MAKE_PROCS} >> ${CONFIG_FILE}
+    if [ ${HDF5_DIR} ]
+    then
+        echo ${HDF5_DIR} >> ${CONFIG_FILE}
+    fi
+    if [ ${NUMPY_ARGS} ]
+    then
+        echo ${NUMPY_ARGS} >> ${CONFIG_FILE}
+    fi
+}
+
+# Write config settings to file.
+CONFIG_FILE=${DEST_DIR}/.yt_config
+mkdir -p ${DEST_DIR}
+if [ -z ${REINST_YT} ] || [ ${REINST_YT} -neq 1 ]
+then
+    write_config
+elif [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -f ${CONFIG_FILE} ]
+then
+    USED_CONFIG=1
+    source ${CONFIG_FILE}
+fi
+
 function get_willwont
 {
     if [ $1 -eq 1 ]
@@ -170,6 +213,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
@@ -181,7 +237,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -192,15 +248,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -208,20 +264,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -278,7 +334,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -362,6 +418,10 @@
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
 
+printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
+get_willwont ${INST_0MQ}
+echo "be installing Rockstar"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -383,6 +443,12 @@
 echo "hit Ctrl-C."
 echo
 host_specific
+if [ ${USED_CONFIG} ]
+then
+    echo "Settings were loaded from ${CONFIG_FILE}."
+    echo "Remove this file if you wish to return to the default settings."
+    echo
+fi
 echo "========================================================================"
 echo
 read -p "[hit enter] "
@@ -424,7 +490,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -486,28 +552,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -515,50 +580,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s|CC=gcc|CC=${CC}|" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -569,11 +634,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,11 +652,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -605,11 +670,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -642,11 +707,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +720,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -674,12 +739,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -696,14 +760,14 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-3.0-hg ] 
+    elif [ ! -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
         # a new webserver, we are now moving to a three-stage process.
         # First we clone the repo, but only up to r0.
-        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-3.0/ ./yt-3.0-hg 2>&1 ) 1>> ${LOG_FILE}
+        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
     elif [ -e yt-3.0-hg ] 
@@ -714,7 +778,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -724,7 +788,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -752,8 +816,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -776,10 +840,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -791,29 +855,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,9 @@
 from distutils.core import Command
 from distutils.spawn import find_executable
 
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
 
 class BuildForthon(Command):
 
@@ -41,9 +44,7 @@
     def run(self):
 
         """runner"""
-        Forthon_exe = find_executable("Forthon")
-        gfortran_exe = find_executable("gfortran")
-
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
         if None in (Forthon_exe, gfortran_exe):
             sys.stderr.write(
                 "fKDpy.so won't be built due to missing Forthon/gfortran\n"
@@ -193,9 +194,13 @@
 
 class my_install_data(np_install_data.install_data):
     def run(self):
-        self.distribution.data_files.append(
-            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
-        )
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
         np_install_data.install_data.run(self)
 
 class my_build_py(build_py):

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -24,6 +24,13 @@
 """
 
 import numpy as np
+from yt.utilities.physical_constants import \
+    charge_proton_cgs, \
+    cm_per_km, \
+    km_per_cm, \
+    mass_electron_cgs, \
+    speed_of_light_cgs
+
 
 def voigt(a,u):
     """
@@ -167,10 +174,10 @@
     """
 
     ## constants
-    me = 1.6726231e-24 / 1836.        # grams mass electron 
-    e = 4.8032e-10                    # esu 
-    c = 2.99792456e5                  # km/s
-    ccgs = c * 1.e5                   # cm/s 
+    me = mass_electron_cgs              # grams mass electron 
+    e = charge_proton_cgs               # esu 
+    c = speed_of_light_cgs * km_per_cm  # km/s
+    ccgs = speed_of_light_cgs           # cm/s 
 
     ## shift lam0 by deltav
     if deltav is not None:
@@ -181,7 +188,7 @@
         lam1 = lam0
 
     ## conversions
-    vdop = vkms * 1.e5                # in cm/s
+    vdop = vkms * cm_per_km           # in cm/s
     lam0cgs = lam0 / 1.e8             # rest wavelength in cm
     lam1cgs = lam1 / 1.e8             # line wavelength in cm
     nu1 = ccgs / lam1cgs              # line freq in Hz

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -117,3 +117,6 @@
 from .two_point_functions.api import \
     TwoPointFunctions, \
     FcnSet
+
+from .radmc3d_export.api import \
+    RadMC3DWriter

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -108,6 +108,7 @@
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
 
         self.light_ray_solution = []
+        self.halo_lists = {}
         self._data = {}
 
         # Get list of datasets for light ray solution.
@@ -192,6 +193,7 @@
                        get_los_velocity=False,
                        get_nearest_halo=False,
                        nearest_halo_fields=None,
+                       halo_list_file=None,
                        halo_profiler_parameters=None,
                        njobs=1, dynamic=False):
         """
@@ -229,6 +231,10 @@
             A list of fields to be calculated for the halos nearest to
             every lixel in the ray.
             Default: None.
+        halo_list_file : str
+            Filename containing a list of halo properties to be used 
+            for getting the nearest halos to absorbers.
+            Default: None.
         halo_profiler_parameters: dict
             A dictionary of parameters to be passed to the HaloProfiler
             to create the appropriate data used to get properties for
@@ -287,7 +293,7 @@
         >>> # Make the profiles.
         >>> halo_profiler_actions.append({'function': make_profiles,
         ...                           'args': None,
-        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...                           'kwargs': {'filename': 'VirializedHalos.h5'}})
         ...
         >>> halo_list = 'filtered'
         >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
@@ -305,6 +311,7 @@
         ...                   get_nearest_halo=True,
         ...                   nearest_halo_fields=['TotalMassMsun_100',
         ...                                        'RadiusMpc_100'],
+        ...                   halo_list_file='VirializedHalos.h5',
         ...                   halo_profiler_parameters=halo_profiler_parameters,
         ...                   get_los_velocity=True)
         
@@ -321,17 +328,18 @@
         # Initialize data structures.
         self._data = {}
         if fields is None: fields = []
-        all_fields = [field for field in fields]
+        data_fields = fields[:]
+        all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_nearest_halo:
             all_fields.extend(['x', 'y', 'z', 'nearest_halo'])
             all_fields.extend(['nearest_halo_%s' % field \
                                for field in nearest_halo_fields])
-            fields.extend(['x', 'y', 'z'])
+            data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['x-velocity', 'y-velocity',
                                'z-velocity', 'los_velocity'])
-            fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
+            data_fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
 
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
@@ -348,10 +356,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            if get_nearest_halo:
-                halo_list = self._get_halo_list(my_segment['filename'],
-                                                **halo_profiler_parameters)
-
             # Load dataset for segment.
             pf = load(my_segment['filename'])
 
@@ -373,7 +377,7 @@
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
-                for field in fields:
+                for field in data_fields:
                     sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
@@ -400,6 +404,9 @@
 
             # Calculate distance to nearest object on halo list for each lixel.
             if get_nearest_halo:
+                halo_list = self._get_halo_list(pf, fields=nearest_halo_fields,
+                                                filename=halo_list_file,
+                                                **halo_profiler_parameters)
                 sub_data.update(self._get_nearest_halo_properties(sub_data, halo_list,
                                 fields=nearest_halo_fields))
                 sub_data['nearest_halo'] *= pf.units['mpccm']
@@ -434,58 +441,92 @@
         self._data = all_data
         return all_data
 
-    def _get_halo_list(self, dataset, halo_profiler_kwargs=None,
+    def _get_halo_list(self, pf, fields=None, filename=None, 
+                       halo_profiler_kwargs=None,
                        halo_profiler_actions=None, halo_list='all'):
-        "Load a list of halos for the dataset."
+        "Load a list of halos for the pf."
+
+        if str(pf) in self.halo_lists:
+            return self.halo_lists[str(pf)]
+
+        if fields is None: fields = []
+
+        if filename is not None and \
+                os.path.exists(os.path.join(pf.fullpath, filename)):
+
+            my_filename = os.path.join(pf.fullpath, filename)
+            mylog.info("Loading halo list from %s." % my_filename)
+            my_list = {}
+            in_file = h5py.File(my_filename, 'r')
+            for field in fields + ['center']:
+                my_list[field] = in_file[field][:]
+            in_file.close()
+
+        else:
+            my_list = self._halo_profiler_list(pf, fields=fields,
+                                               halo_profiler_kwargs=halo_profiler_kwargs,
+                                               halo_profiler_actions=halo_profiler_actions,
+                                               halo_list=halo_list)
+
+        self.halo_lists[str(pf)] = my_list
+        return self.halo_lists[str(pf)]
+
+    def _halo_profiler_list(self, pf, fields=None, 
+                            halo_profiler_kwargs=None,
+                            halo_profiler_actions=None, halo_list='all'):
+        "Run the HaloProfiler to get the halo list."
 
         if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
         if halo_profiler_actions is None: halo_profiler_actions = []
 
-        hp = HaloProfiler(dataset, **halo_profiler_kwargs)
+        hp = HaloProfiler(pf, **halo_profiler_kwargs)
         for action in halo_profiler_actions:
             if not action.has_key('args'): action['args'] = ()
             if not action.has_key('kwargs'): action['kwargs'] = {}
             action['function'](hp, *action['args'], **action['kwargs'])
 
         if halo_list == 'all':
-            return_list = copy.deepcopy(hp.all_halos)
+            hp_list = copy.deepcopy(hp.all_halos)
         elif halo_list == 'filtered':
-            return_list = copy.deepcopy(hp.filtered_halos)
+            hp_list = copy.deepcopy(hp.filtered_halos)
         else:
             mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-            return_list = None
+            hp_list = None
 
         del hp
+
+        # Create position array from halo list.
+        return_list = dict([(field, []) for field in fields + ['center']])
+        for halo in hp_list:
+            for field in fields + ['center']:
+                return_list[field].append(halo[field])
+        for field in fields + ['center']:
+            return_list[field] = np.array(return_list[field])
         return return_list
-
+        
     def _get_nearest_halo_properties(self, data, halo_list, fields=None):
         """
         Calculate distance to nearest object in halo list for each lixel in data.
-        Return list of distances and masses of nearest objects.
+        Return list of distances and other properties of nearest objects.
         """
 
         if fields is None: fields = []
+        field_data = dict([(field, np.zeros_like(data['x'])) \
+                           for field in fields])
+        nearest_distance = np.zeros_like(data['x'])
 
-        # Create position array from halo list.
-        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
-                                                       halo_list))) \
-                                  for field in fields])
-
-        nearest_distance = np.zeros(data['x'].shape)
-        field_data = dict([(field, np.zeros(data['x'].shape)) \
-                           for field in fields])
-        for index in xrange(nearest_distance.size):
-            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
-                                                            data['y'][index],
-                                                            data['z'][index]]),
-                                                  halo_centers))
-            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
-                                                                  data['y'][index],
-                                                                  data['z'][index]]),
-                                                        halo_centers[nearest])
-            for field in fields:
-                field_data[field][index] = halo_field_values[field][nearest]
+        if halo_list['center'].size > 0:
+            for index in xrange(nearest_distance.size):
+                nearest = np.argmin(periodic_distance(np.array([data['x'][index],
+                                                                data['y'][index],
+                                                                data['z'][index]]),
+                                                      halo_list['center']))
+                nearest_distance[index] = periodic_distance(np.array([data['x'][index],
+                                                                      data['y'][index],
+                                                                      data['z'][index]]),
+                                                            halo_list['center'][nearest])
+                for field in fields:
+                    field_data[field][index] = halo_list[field][nearest]
 
         return_data = {'nearest_halo': nearest_distance}
         for field in fields:

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -45,7 +45,10 @@
 from yt.utilities.performance_counters import \
     yt_counters, time_function
 from yt.utilities.math_utils import periodic_dist, get_rotation_matrix
-from yt.utilities.physical_constants import rho_crit_now, mass_sun_cgs
+from yt.utilities.physical_constants import \
+    rho_crit_now, \
+    mass_sun_cgs, \
+    TINY
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
@@ -60,8 +63,6 @@
     ParallelAnalysisInterface, \
     parallel_blocking_call
 
-TINY = 1.e-40
-
 class Halo(object):
     """
     A data source that returns particle information about the members of a
@@ -143,10 +144,10 @@
             return self.CoM
         pm = self["ParticleMassMsun"]
         c = {}
-        c[0] = self["particle_position_x"]
-        c[1] = self["particle_position_y"]
-        c[2] = self["particle_position_z"]
-        c_vec = np.zeros(3)
+        # We shift into a box where the origin is the left edge
+        c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
+        c[1] = self["particle_position_y"] - self.pf.domain_left_edge[1]
+        c[2] = self["particle_position_z"] - self.pf.domain_left_edge[2]
         com = []
         for i in range(3):
             # A halo is likely periodic around a boundary if the distance 
@@ -159,13 +160,12 @@
                 com.append(c[i])
                 continue
             # Now we want to flip around only those close to the left boundary.
-            d_left = c[i] - self.pf.domain_left_edge[i]
-            sel = (d_left <= (self.pf.domain_width[i]/2))
+            sel = (c[i] <= (self.pf.domain_width[i]/2))
             c[i][sel] += self.pf.domain_width[i]
             com.append(c[i])
         com = np.array(com)
         c = (com * pm).sum(axis=1) / pm.sum()
-        return c%self.pf.domain_width
+        return c%self.pf.domain_width + self.pf.domain_left_edge
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -1062,7 +1062,7 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
+        *dm_only* is True (default), only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
         self._data_source = data_source
@@ -1097,7 +1097,7 @@
     def _get_dm_indices(self):
         if 'creation_time' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on creation time")
-            return (self._data_source["creation_time"] < 0)
+            return (self._data_source["creation_time"] <= 0)
         elif 'particle_type' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")
@@ -1428,7 +1429,7 @@
         fglob = path.join(basedir, 'halos_%d.*.bin' % n)
         files = glob.glob(fglob)
         halos = self._get_halos_binary(files)
-        #Jc = 1.98892e33/pf['mpchcm']*1e5
+        #Jc = mass_sun_cgs/ pf['mpchcm'] * 1e5
         Jc = 1.0
         length = 1.0 / pf['mpchcm']
         conv = dict(pos = np.array([length, length, length,
@@ -1457,7 +1458,7 @@
 class HOPHaloList(HaloList):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "HOP"
@@ -1656,7 +1657,7 @@
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "parallelHOP"
@@ -2008,13 +2009,11 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
-
+        ensure_dir_exists(filename)
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
 
+
     def write_particle_lists_txt(self, prefix):
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file.
@@ -2031,13 +2030,11 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
+
     @parallel_blocking_call
     def write_particle_lists(self, prefix):
         r"""Write out the particle data for halos to HDF5 files.
@@ -2058,10 +2055,7 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
@@ -2090,15 +2084,12 @@
         ellipsoid_data : bool.
             Whether to save the ellipsoidal information to the files.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.dump("MyHalos")
         """
-        # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
-
+        ensure_dir_exists(basename)
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
@@ -2131,7 +2122,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     resize : bool
         Turns load-balancing on or off. Default = True.
     kdtree : string
@@ -2460,7 +2451,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number
@@ -2565,7 +2556,7 @@
         applied.  Default = 0.2.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True
@@ -337,6 +338,8 @@
                     hires_only = (self.hires_dm_mass is not None),
                     **kwargs)
         # Make the directory to store the halo lists in.
+        if not self.outbase:
+            self.outbase = os.getcwd()
         if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
                 os.makedirs(self.outbase)

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -31,6 +31,11 @@
     ParallelDummy, \
     ParallelAnalysisInterface, \
     parallel_blocking_call
+from yt.utilities.physical_constants import \
+    cm_per_mpc, \
+    mass_sun_cgs, \
+    rho_crit_now
+
 
 class HaloMassFcn(ParallelAnalysisInterface):
     """
@@ -212,7 +217,7 @@
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
             if i == (self.num_sigma_bins - 3): break
 
-        self.dis = dis  / self.pf['CosmologyComovingBoxSize']**3.0 * self.hubble0**3.0
+        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
 
     def sigmaM(self):
         """
@@ -259,7 +264,9 @@
         sigma8_unnorm = math.sqrt(self.sigma_squared_of_R(R));
         sigma_normalization = self.sigma8input / sigma8_unnorm;
 
-        rho0 = self.omega_matter0 * 2.78e+11; # in units of h^2 Msolar/Mpc^3
+        # rho0 in units of h^2 Msolar/Mpc^3
+        rho0 = self.omega_matter0 * \
+                rho_crit_now * cm_per_mpc**3 / mass_sun_cgs
 
         # spacing in mass of our sigma calculation
         dm = (float(self.log_mass_max) - self.log_mass_min)/self.num_sigma_bins;
@@ -294,7 +301,9 @@
     def dndm(self):
         
         # constants - set these before calling any functions!
-        rho0 = self.omega_matter0 * 2.78e+11; # in units of h^2 Msolar/Mpc^3
+        # rho0 in units of h^2 Msolar/Mpc^3
+        rho0 = self.omega_matter0 * \
+                rho_crit_now * cm_per_mpc**3 / mass_sun_cgs
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)
         
         nofmz_cum = 0.0;  # keep track of cumulative number density

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -27,6 +27,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.utilities.physical_constants import TINY
 
 def VirialFilter(profile, overdensity_field='ActualOverdensity',
                  virial_overdensity=200., must_be_virialized=True,
@@ -105,7 +106,8 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = np.log10(temp_profile[field])
+            temp_profile[field] = np.log10(np.clip(temp_profile[field], TINY,
+                                                   max(temp_profile[field])))
 
     virial = dict((field, 0.0) for field in fields)
 

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import gc
 import numpy as np
 import os
 import h5py
@@ -52,6 +53,9 @@
     parallel_blocking_call, \
     parallel_root_only, \
     parallel_objects
+from yt.utilities.physical_constants import \
+    mass_sun_cgs, \
+    rho_crit_now
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
 from yt.visualization.image_writer import write_image
@@ -583,7 +587,7 @@
 
             r_min = 2 * self.pf.h.get_smallest_dx() * self.pf['mpc']
             if (halo['r_max'] / r_min < PROFILE_RADIUS_THRESHOLD):
-                mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
+                mylog.debug("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
                 return None
 
             # get a sphere object to profile
@@ -629,6 +633,10 @@
                 g.clear_data()
             sphere.clear_data()
             del sphere
+            # Currently, this seems to be the only way to prevent large 
+            # halo profiling runs from running out of ram.
+            # It would be good to track down the real cause at some point.
+            gc.collect()
 
         return profile
 
@@ -951,12 +959,11 @@
         if 'ActualOverdensity' in profile.keys():
             return
 
-        rho_crit_now = 1.8788e-29 * self.pf.hubble_constant**2 # g cm^-3
-        Msun2g = 1.989e33
-        rho_crit = rho_crit_now * ((1.0 + self.pf.current_redshift)**3.0)
+        rhocritnow = rho_crit_now * self.pf.hubble_constant**2 # g cm^-3
+        rho_crit = rhocritnow * ((1.0 + self.pf.current_redshift)**3.0)
         if not self.use_critical_density: rho_crit *= self.pf.omega_matter
 
-        profile['ActualOverdensity'] = (Msun2g * profile['TotalMassMsun']) / \
+        profile['ActualOverdensity'] = (mass_sun_cgs * profile['TotalMassMsun']) / \
             profile['CellVolume'] / rho_crit
 
     def _check_for_needed_profile_fields(self):

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -67,8 +67,10 @@
         self.prof = prof
 
     def plot_everything(self, dirname = None):
-        if dirname is None: dirname = "%s_profile_plots/" % (self.pf)
-        if not os.path.isdir(dirname): os.makedirs(dirname)
+        if not dirname:
+            dirname = "%s_profile_plots/" % (self.pf)
+        if not os.path.isdir(dirname):
+            os.makedirs(dirname)
         import matplotlib; matplotlib.use("Agg")
         import pylab
         for field in self.prof.keys():

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -0,0 +1,334 @@
+"""
+Code to export from yt to RadMC3D
+
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from yt.mods import *
+from yt.utilities.lib.write_array import \
+    write_3D_array, write_3D_vector_array
+
+class RadMC3DLayer:
+    '''
+
+    This class represents an AMR "layer" of the style described in
+    the radmc3d manual. Unlike yt grids, layers may not have more
+    than one parent, so level L grids will need to be split up
+    if they straddle two or more level L - 1 grids. 
+
+    '''
+    def __init__(self, level, parent, unique_id, LE, RE, dim):
+        self.level = level
+        self.parent = parent
+        self.LeftEdge = LE
+        self.RightEdge = RE
+        self.ActiveDimensions = dim
+        self.id = unique_id
+
+    def get_overlap_with(self, grid):
+        '''
+
+        Returns the overlapping region between two Layers,
+        or a layer and a grid. RE < LE means in any direction
+        means no overlap.
+
+        '''
+        LE = np.maximum(self.LeftEdge,  grid.LeftEdge)
+        RE = np.minimum(self.RightEdge, grid.RightEdge)
+        return LE, RE
+
+    def overlaps(self, grid):
+        '''
+
+        Returns whether or not this layer overlaps a given grid
+        
+        '''
+        LE, RE = self.get_overlap_with(grid)
+        if np.any(RE <= LE):
+            return False
+        else:
+            return True
+
+class RadMC3DWriter:
+    '''
+
+    This class provides a mechanism for writing out data files in a format
+    readable by radmc3d. Currently, only the ASCII, "Layer" style file format
+    is supported. For more information please see the radmc3d manual at:
+    http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d
+
+    Parameters
+    ----------
+
+    pf : `StaticOutput`
+        This is the parameter file object corresponding to the
+        simulation output to be written out.
+
+    max_level : int
+        An int corresponding to the maximum number of levels of refinement
+        to include in the output. Often, this does not need to be very large
+        as information on very high levels is frequently unobservable.
+        Default = 2. 
+
+    Examples
+    --------
+
+    This will create a field called "DustDensity" and write it out to the
+    file "dust_density.inp" in a form readable by radmc3d. It will also write
+    a "dust_temperature.inp" file with everything set to 10.0 K: 
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> dust_to_gas = 0.01
+    >>> def _DustDensity(field, data):
+    ...     return dust_to_gas*data["Density"]
+    >>> add_field("DustDensity", function=_DustDensity)
+
+    >>> def _DustTemperature(field, data):
+    ...     return 10.0*data["Ones"]
+    >>> add_field("DustTemperature", function=_DustTemperature)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_dust_file("DustDensity", "dust_density.inp")
+    >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
+
+    This will create a field called "NumberDensityCO" and write it out to
+    the file "numberdens_co.inp". It will also write out information about
+    the gas velocity to "gas_velocity.inp" so that this broadening may be
+    included in the radiative transfer calculation by radmc3d:
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> x_co = 1.0e-4
+    >>> mu_h = 2.34e-24
+    >>> def _NumberDensityCO(field, data):
+    ...     return (x_co/mu_h)*data["Density"]
+    >>> add_field("NumberDensityCO", function=_NumberDensityCO)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
+    >>> velocity_fields = ["x-velocity", "y-velocity", "z-velocity"]
+    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") 
+
+    '''
+
+    def __init__(self, pf, max_level=2):
+        self.max_level = max_level
+        self.cell_count = 0 
+        self.layers = []
+        self.domain_dimensions = pf.domain_dimensions
+        self.domain_left_edge  = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.grid_filename = "amr_grid.inp"
+        self.pf = pf
+
+        base_layer = RadMC3DLayer(0, None, 0, \
+                                  self.domain_left_edge, \
+                                  self.domain_right_edge, \
+                                  self.domain_dimensions)
+
+        self.layers.append(base_layer)
+        self.cell_count += np.product(pf.domain_dimensions)
+
+        for grid in pf.h.grids:
+            if grid.Level <= self.max_level:
+                self._add_grid_to_layers(grid)
+
+    def _get_parents(self, grid):
+        parents = []  
+        for potential_parent in self.layers:
+            if potential_parent.level == grid.Level - 1:
+                if potential_parent.overlaps(grid):
+                    parents.append(potential_parent)
+        return parents
+
+    def _add_grid_to_layers(self, grid):
+        parents = self._get_parents(grid)
+        for parent in parents:
+            LE, RE = parent.get_overlap_with(grid)
+            N = (RE - LE) / grid.dds
+            N = np.array([int(n + 0.5) for n in N])
+            new_layer = RadMC3DLayer(grid.Level, parent.id, \
+                                     len(self.layers), \
+                                     LE, RE, N)
+            self.layers.append(new_layer)
+            self.cell_count += np.product(N)
+            
+    def write_amr_grid(self):
+        '''
+        This routine writes the "amr_grid.inp" file that describes the mesh
+        radmc3d will use.
+
+        '''
+        dims = self.domain_dimensions
+        LE   = self.domain_left_edge
+        RE   = self.domain_right_edge
+
+        # calculate cell wall positions
+        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+
+        # writer file header
+        grid_file = open(self.grid_filename, 'w')
+        grid_file.write('1 \n') # iformat is always 1
+        if self.max_level == 0:
+            grid_file.write('0 \n')
+        else:
+            grid_file.write('10 \n') # only layer-style AMR files are supported
+        grid_file.write('1 \n') # only cartesian coordinates are supported
+        grid_file.write('0 \n') 
+        grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
+        grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
+        if self.max_level != 0:
+            s = str(self.max_level) + '    ' + str(len(self.layers)-1) + '\n'
+            grid_file.write(s)
+
+        # write base grid cell wall positions
+        for x in xs:
+            grid_file.write(x + '    ')
+        grid_file.write('\n')
+
+        for y in ys:
+            grid_file.write(y + '    ')
+        grid_file.write('\n')
+
+        for z in zs:
+            grid_file.write(z + '    ')
+        grid_file.write('\n')
+
+        # write information about fine layers, skipping the base layer:
+        for layer in self.layers[1:]:
+            p = layer.parent
+            dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions)
+            if p == 0:
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            else:
+                LE = np.zeros(3)
+                for potential_parent in self.layers:
+                    if potential_parent.id == p:
+                        LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            ix  = int(ind[0]+0.5)
+            iy  = int(ind[1]+0.5)
+            iz  = int(ind[2]+0.5)
+            nx, ny, nz = layer.ActiveDimensions / 2
+            s = '{}    {}    {}    {}    {}    {}    {} \n'
+            s = s.format(p, ix, iy, iz, nx, ny, nz)
+            grid_file.write(s)
+
+        grid_file.close()
+
+    def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
+        cg = self.pf.h.covering_grid(level, LE, dim, num_ghost_zones=1)
+        if isinstance(field, list):
+            data_x = cg[field[0]]
+            data_y = cg[field[1]]
+            data_z = cg[field[2]]
+            write_3D_vector_array(data_x, data_y, data_z, fhandle)
+        else:
+            data = cg[field]
+            write_3D_array(data, fhandle)
+
+    def write_dust_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        thermal dust emission. In particular, if you have a field called
+        "DustDensity", you can write out a dust_density.inp file.
+
+        Parameters
+        ----------
+
+        field : string
+            The name of the field to be written out
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operations are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+        fhandle.write('1 \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+            
+        fhandle.close()
+
+    def write_line_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        line emission.
+
+        Parameters
+        ----------
+
+        field : string or list of 3 strings
+            If a string, the name of the field to be written out. If a list,
+            three fields that will be written to the file as a vector quantity.
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operation are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+
+        fhandle.close()

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/radmc3d_export/api.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/api.py
@@ -0,0 +1,30 @@
+"""
+API for RadMC3D Export code
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .RadMC3DInterface import \
+    RadMC3DWriter

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -20,4 +20,5 @@
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
+    config.add_subpackage("radmc3d_export")
     return config

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -37,6 +37,9 @@
 from yt.utilities.exceptions import YTException
 from yt.utilities.linear_interpolators import \
     BilinearFieldInterpolator
+from yt.utilities.physical_constants import \
+    erg_per_eV, \
+    keV_per_Hz
 
 xray_data_version = 1
 
@@ -101,7 +104,7 @@
                   np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
                                                [self.log_E[-1] - 0.5 * E_diff[-1],
                                                 self.log_E[-1] + 0.5 * E_diff[-1]]]))
-        self.dnu = 2.41799e17 * np.diff(self.E_bins)
+        self.dnu = keV_per_Hz * np.diff(self.E_bins)
 
     def _get_interpolator(self, data, e_min, e_max):
         r"""Create an interpolator for total emissivity in a 
@@ -311,7 +314,7 @@
     """
 
     my_si = EmissivityIntegrator(filename=filename)
-    energy_erg = np.power(10, my_si.log_E) * 1.60217646e-9
+    energy_erg = np.power(10, my_si.log_E) * erg_per_eV
 
     em_0 = my_si._get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
                                    e_min, e_max)

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -31,9 +31,13 @@
 from yt.utilities.cosmology import \
     Cosmology, \
     EnzoCosmology
+from yt.utilities.physical_constants import \
+    sec_per_year, \
+    speed_of_light_cgs
 
-YEAR = 3.155693e7 # sec / year
-LIGHT = 2.997925e10 # cm / s
+
+YEAR = sec_per_year # sec / year
+LIGHT = speed_of_light_cgs # cm / s
 
 class StarFormationRate(object):
     r"""Calculates the star formation rate for a given population of

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -35,6 +35,9 @@
 import numpy as np
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
+from yt.utilities.physical_constants import \
+    kpc_per_cm, \
+    sec_per_year
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
@@ -524,7 +527,7 @@
                         for ax in 'xyz']).transpose()
         # Velocity is cm/s, we want it to be kpc/yr
         #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
-        vel *= 1.02268944e-14 
+        vel *= kpc_per_cm * sec_per_year
     if initial_mass is None:
         #in solar masses
         initial_mass = dd["particle_mass_initial"][idx]*pf['Msun']

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,9 +62,10 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold008',
     local_standard_filename = 'local001',
-    sketchfab_api_key = 'None'
+    sketchfab_api_key = 'None',
+    thread_field_detection = 'False'
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/fd0cc68d6934/
Changeset:   fd0cc68d6934
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:02:32
Summary:     Fixed dict init
Affected #:  1 file

diff -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 -r fd0cc68d6934e930985cac2128ca60d061f7db42 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -42,10 +42,15 @@
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
     tb, ages = None, None
-    cache = {}
-    masks = {}
+    cache = None
+    masks = None
     caching = False
 
+    def __init__(self):
+        self.cache = {}
+        self.masks = {}
+        super(IOHandlerART, self).__init__()
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
         # Each domain subset will contain a hydro_offset array, which gives


https://bitbucket.org/yt_analysis/yt-3.0/commits/87069c899fe6/
Changeset:   87069c899fe6
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:29:43
Summary:     Switched to standard particle deposit fields
Affected #:  1 file

diff -r fd0cc68d6934e930985cac2128ca60d061f7db42 -r 87069c899fe61f86661a5d78813e8fbc95b1d943 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -42,6 +42,10 @@
 from yt.utilities.physical_constants import mass_sun_cgs
 from yt.frontends.art.definitions import *
 
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
+
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
@@ -264,115 +268,12 @@
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
 
-# Modeled after the TIPSY / Gadget frontend particle deposit fields
-def _particle_functions(ptype, pname):
-    mass_name = "particle_mass"
-    def particle_pos(data, axes="xyz"):
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]\
-                                    for ax in axes])
-        if len(axes)==1:
-            return pos[0]
-        return pos
+# Particle Deposition Fields
+_ptypes = ["all", "darkmatter", "stars", "specie0"]
 
-    def particle_vel(data, axes="xyz"):
-        pos = np.column_stack([data[(ptype, "particle_velocity_%s" % ax)]\
-                                    for ax in axes])
-        if len(axes)==1:
-            return pos[0]
-        return pos
-
-    def particle_count(field, data):
-        pos = particle_pos(data)
-        d = data.deposit(pos, method = "count")
-        return d
-    
-    add_field("deposit_%s_count" % ptype,
-             function = particle_count,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % pname,
-             projection_conversion = '1')
-
-    def particle_mass(field, data):
-        pos = particle_pos(data)
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        return d
-
-    add_field("deposit_%s_mass" % ptype,
-             function = particle_mass,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % pname,
-             units = r"\mathrm{g}",
-             projected_units = r"\mathrm{g}\/\mathrm{cm}",
-             projection_conversion = 'cm')
-
-    def particle_density(field, data):
-        pos = particle_pos(data)
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        d /= data["CellVolume"]
-        return d
-
-    add_field("deposit_%s_density" % ptype,
-             function = particle_density,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Density}" % pname,
-             units = r"\mathrm{g}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{g}/\mathrm{cm}^{-2}",
-             projection_conversion = 'cm')
-
-    def particle_number_density(field, data):
-        pos = particle_pos(data)
-        d = data.deposit(pos, method = "count")
-        d /= data["CellVolume"]
-        return d
-
-    add_field("deposit_%s_number_density" % ptype,
-             function = particle_density,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Number Density}" % pname,
-             units = r"\mathrm{1}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{1}/\mathrm{cm}^{-2}",
-             projection_conversion = 'cm')
-
-
-    for ax in "xyz":
-        def particle_mass_velocity(field, data, ax):
-            pos = particle_pos(data)
-            vel = particle_vel(data, ax) 
-            mass = data[ptype, mass_name]
-            d = data.deposit(pos, [vel, mass], method = "weighted_mean")
-            d[~np.isfinite(d)] = 0.0
-            return d
-
-        add_field("deposit_%s_weighted_velocity_%s" % (ptype, ax),
-                 function = lambda f, d: particle_mass_velocity(f, d, ax),
-                 validators = [ValidateSpatial()],
-                 display_name = "\\mathrm{%s Mass Weighted Velocity %s}" % \
-                                (pname, ax.upper()),
-                 units = r"\mathrm{\mathrm{cm}/\mathrm{s}}",
-                 projected_units = r"\mathrm{\mathrm{cm}/\mathrm{s}}",
-                 projection_conversion = '1',
-                 take_log=False)
-
-    add_field((ptype, "ParticleMass"),
-            function = TranslationFunc((ptype, mass_name)),
-            particle_type = True,
-            units = r"\mathrm{g}")
-
-    def _ParticleMassMsun(field, data):
-        return data[ptype, mass_name].copy()
-    def _conv_Msun(data):
-        return 1.0/mass_sun_cgs
-
-    add_field((ptype, "ParticleMassMsun"),
-            function = _ParticleMassMsun,
-            convert_function = _conv_Msun,
-            particle_type = True,
-            units = r"\mathrm{M}_\odot")
-
-# Particle Deposition Fields
-_ptypes = ["all", "darkmatter", "stars"]
-_pnames  = ["Particle", "Dark Matter", "Stellar"]
-
-for _ptype, _pname in zip(_ptypes, _pnames):
-    _particle_functions(_ptype, _pname)
-
+for _ptype in _ptypes:
+    particle_vector_functions(_ptype, ["particle_position_%s" % ax for ax in 'xyz'],
+                                     ["particle_velocity_%s" % ax for ax in 'xyz'],
+                              ARTFieldInfo)
+    particle_deposition_functions(_ptype, "Coordinates", "particle_mass",
+                                   ARTFieldInfo)


https://bitbucket.org/yt_analysis/yt-3.0/commits/e68df96c341a/
Changeset:   e68df96c341a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:36:46
Summary:     Made the field list more explicit about gas fields
Affected #:  2 files

diff -r 87069c899fe61f86661a5d78813e8fbc95b1d943 -r e68df96c341a0a5f2526c57678121c816ceb8cac yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -46,6 +46,7 @@
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
         return d
+
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],

diff -r 87069c899fe61f86661a5d78813e8fbc95b1d943 -r e68df96c341a0a5f2526c57678121c816ceb8cac yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -124,9 +124,9 @@
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
-        self.field_list = set(fluid_fields + particle_fields +
-                              particle_star_fields)
-        self.field_list = list(self.field_list)
+        self.field_list = [("gas", f) for f in fluid_fields]
+        self.field_list += set(particle_fields + particle_star_fields \
+                               + fluid_fields)
         # now generate all of the possible particle fields
         if "wspecies" in self.parameter_file.parameters.keys():
             wspecies = self.parameter_file.parameters['wspecies']


https://bitbucket.org/yt_analysis/yt-3.0/commits/d2eb763a76c3/
Changeset:   d2eb763a76c3
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:49:49
Summary:     Changed particle_mass to have projected units of g. particle_mass_width has units of g-cm.
Affected #:  1 file

diff -r e68df96c341a0a5f2526c57678121c816ceb8cac -r d2eb763a76c30e0e97f835143272048dc5e3d6c9 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -56,13 +56,28 @@
     def particle_mass(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        return d
+        dx = data["dx"]
+        return d / dx
 
     registry.add_field(("deposit", "%s_mass" % ptype),
              function = particle_mass,
              validators = [ValidateSpatial()],
              display_name = "\\mathrm{%s Mass}" % ptype,
              units = r"\mathrm{g}",
+             projected_units = r"\mathrm{g}",
+             projection_conversion = '1')
+
+
+    def particle_mass_width(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        return d
+
+    registry.add_field(("deposit", "%s_mass_width" % ptype),
+             function = particle_mass_width,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Mass}" % ptype,
+             units = r"\mathrm{g}",
              projected_units = r"\mathrm{g}\/\mathrm{cm}",
              projection_conversion = 'cm')
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/d2832a1a86d3/
Changeset:   d2832a1a86d3
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 18:50:17
Summary:     reversing the confused particle mass changes i made
Affected #:  1 file

diff -r d2eb763a76c30e0e97f835143272048dc5e3d6c9 -r d2832a1a86d3b54e5af5c8c3639cd45726066347 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -56,28 +56,13 @@
     def particle_mass(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        dx = data["dx"]
-        return d / dx
+        return d
 
     registry.add_field(("deposit", "%s_mass" % ptype),
              function = particle_mass,
              validators = [ValidateSpatial()],
              display_name = "\\mathrm{%s Mass}" % ptype,
              units = r"\mathrm{g}",
-             projected_units = r"\mathrm{g}",
-             projection_conversion = '1')
-
-
-    def particle_mass_width(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        return d
-
-    registry.add_field(("deposit", "%s_mass_width" % ptype),
-             function = particle_mass_width,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % ptype,
-             units = r"\mathrm{g}",
              projected_units = r"\mathrm{g}\/\mathrm{cm}",
              projection_conversion = 'cm')
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/352fa26ed2ed/
Changeset:   352fa26ed2ed
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 19:22:15
Summary:     added mixed fluid-particle fields
Affected #:  1 file

diff -r d2832a1a86d3b54e5af5c8c3639cd45726066347 -r 352fa26ed2edf1052b9faa7dd2a37d99186a1e1a yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -277,3 +277,60 @@
                               ARTFieldInfo)
     particle_deposition_functions(_ptype, "Coordinates", "particle_mass",
                                    ARTFieldInfo)
+
+# Mixed Fluid-Particle Fields
+
+def baryon_density(field, data):
+    pos = np.column_stack([data["stars", "particle_position_%s" % ax]
+        for ax in 'xyz'])
+    pmass = data["stars", "particle_mass"]
+    mass  = data.deposit(pos, [pmass], method = "sum")
+    mass += data["gas", "CellMass"]
+    vol   = data["CellVolume"]
+    return mass / vol
+
+ARTFieldInfo.add_field(("deposit", "baryon_density"),
+         function = baryon_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Baryon Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+def total_density(field, data):
+    ptype = 'specie0'
+    rho = data["deposit", "baryon_density"]
+    pos = np.column_stack([data[ptype, "particle_position_%s" % ax]
+                           for ax in 'xyz'])
+    pmas = data[ptype, "particle_mass"]
+    mass = data.deposit(pos, [pmas], method = "sum")
+    vol  = data["gas", "CellVolume"]
+    return rho + (mass / vol)
+
+ARTFieldInfo.add_field(("deposit", "total_density"),
+         function = total_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Total Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+
+def multimass_density(field, data):
+    ptype = 'darkmatter'
+    rho = data["deposit", "baryon_density"]
+    pos = np.column_stack([data[ptype, "particle_position_%s" % ax]
+                           for ax in 'xyz'])
+    pmas = data[ptype, "particle_mass"]
+    mass = data.deposit(pos, [pmas], method = "sum")
+    vol   = data["gas", "CellVolume"]
+    return rho + mass / vol
+
+ARTFieldInfo.add_field(("deposit", "multimass_density"),
+         function = total_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Multimass Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/869e903e60c4/
Changeset:   869e903e60c4
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 19:22:35
Summary:     making particle_density derived from particle_mass
Affected #:  1 file

diff -r 352fa26ed2edf1052b9faa7dd2a37d99186a1e1a -r 869e903e60c40b9e6939e94d19ddcbf37555f7d8 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -67,8 +67,7 @@
              projection_conversion = 'cm')
 
     def particle_density(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d = data["deposit", "%s_mass" % ptype]
         d /= data["CellVolume"]
         return d
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/5f66b8559208/
Changeset:   5f66b8559208
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 21:36:38
Summary:     making Density fields explicit in field type
Affected #:  2 files

diff -r 869e903e60c40b9e6939e94d19ddcbf37555f7d8 -r 5f66b8559208fd3aa2f3d15892ec0da91c39b589 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -67,7 +67,8 @@
              projection_conversion = 'cm')
 
     def particle_density(field, data):
-        d = data["deposit", "%s_mass" % ptype]
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
         d /= data["CellVolume"]
         return d
 

diff -r 869e903e60c40b9e6939e94d19ddcbf37555f7d8 -r 5f66b8559208fd3aa2f3d15892ec0da91c39b589 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -286,7 +286,7 @@
     pmass = data["stars", "particle_mass"]
     mass  = data.deposit(pos, [pmass], method = "sum")
     mass += data["gas", "CellMass"]
-    vol   = data["CellVolume"]
+    vol   = data["gas", "CellVolume"]
     return mass / vol
 
 ARTFieldInfo.add_field(("deposit", "baryon_density"),
@@ -323,11 +323,11 @@
                            for ax in 'xyz'])
     pmas = data[ptype, "particle_mass"]
     mass = data.deposit(pos, [pmas], method = "sum")
-    vol   = data["gas", "CellVolume"]
+    vol   = data["gas","CellVolume"]
     return rho + mass / vol
 
 ARTFieldInfo.add_field(("deposit", "multimass_density"),
-         function = total_density,
+         function = multimass_density,
          validators = [ValidateSpatial()],
          display_name = "\\mathrm{Multimass Density}",
          units = r"\mathrm{g}/\mathrm{cm}^{3}",


https://bitbucket.org/yt_analysis/yt-3.0/commits/ba3e6dc5c90b/
Changeset:   ba3e6dc5c90b
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 21:52:46
Summary:     mixed densities now derive from individual field type densities
Affected #:  1 file

diff -r 5f66b8559208fd3aa2f3d15892ec0da91c39b589 -r ba3e6dc5c90bbcf47762f4c31da4ac0d7787c385 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -281,13 +281,9 @@
 # Mixed Fluid-Particle Fields
 
 def baryon_density(field, data):
-    pos = np.column_stack([data["stars", "particle_position_%s" % ax]
-        for ax in 'xyz'])
-    pmass = data["stars", "particle_mass"]
-    mass  = data.deposit(pos, [pmass], method = "sum")
-    mass += data["gas", "CellMass"]
-    vol   = data["gas", "CellVolume"]
-    return mass / vol
+    rho = data["deposit", "stars_density"]
+    rho += data["gas", "Density"]
+    return rho
 
 ARTFieldInfo.add_field(("deposit", "baryon_density"),
          function = baryon_density,
@@ -298,14 +294,9 @@
          projection_conversion = 'cm')
 
 def total_density(field, data):
-    ptype = 'specie0'
     rho = data["deposit", "baryon_density"]
-    pos = np.column_stack([data[ptype, "particle_position_%s" % ax]
-                           for ax in 'xyz'])
-    pmas = data[ptype, "particle_mass"]
-    mass = data.deposit(pos, [pmas], method = "sum")
-    vol  = data["gas", "CellVolume"]
-    return rho + (mass / vol)
+    rho += data["deposit", "specie0_density"]
+    return rho
 
 ARTFieldInfo.add_field(("deposit", "total_density"),
          function = total_density,
@@ -315,16 +306,10 @@
          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
          projection_conversion = 'cm')
 
-
 def multimass_density(field, data):
-    ptype = 'darkmatter'
     rho = data["deposit", "baryon_density"]
-    pos = np.column_stack([data[ptype, "particle_position_%s" % ax]
-                           for ax in 'xyz'])
-    pmas = data[ptype, "particle_mass"]
-    mass = data.deposit(pos, [pmas], method = "sum")
-    vol   = data["gas","CellVolume"]
-    return rho + mass / vol
+    rho += data["deposit", "darkmatter_density"]
+    return rho
 
 ARTFieldInfo.add_field(("deposit", "multimass_density"),
          function = multimass_density,


https://bitbucket.org/yt_analysis/yt-3.0/commits/064dc07441c8/
Changeset:   064dc07441c8
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-15 01:48:09
Summary:     Forgot to turn on cacheing
Affected #:  1 file

diff -r ba3e6dc5c90bbcf47762f4c31da4ac0d7787c385 -r 064dc07441c86efd0a32d616b013fdd77a7a88d1 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -44,7 +44,7 @@
     tb, ages = None, None
     cache = None
     masks = None
-    caching = False
+    caching = True
 
     def __init__(self):
         self.cache = {}


https://bitbucket.org/yt_analysis/yt-3.0/commits/cea962acce98/
Changeset:   cea962acce98
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-20 00:49:46
Summary:     Merged in juxtaposicion/yt-3.0 (pull request #37)

NMSU-ART Enhancements
Affected #:  6 files

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -46,6 +46,7 @@
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
         return d
+
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -124,9 +124,9 @@
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
-        self.field_list = set(fluid_fields + particle_fields +
-                              particle_star_fields)
-        self.field_list = list(self.field_list)
+        self.field_list = [("gas", f) for f in fluid_fields]
+        self.field_list += set(particle_fields + particle_star_fields \
+                               + fluid_fields)
         # now generate all of the possible particle fields
         if "wspecies" in self.parameter_file.parameters.keys():
             wspecies = self.parameter_file.parameters['wspecies']
@@ -136,6 +136,10 @@
                 self.parameter_file.particle_types.append("specie%i" % specie)
         else:
             self.parameter_file.particle_types = []
+        for ptype in self.parameter_file.particle_types:
+            for pfield in self.particle_field_list:
+                pfn = (ptype, pfield)
+                self.field_list.append(pfn)
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -25,6 +25,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import numpy as np
+
+from yt.funcs import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -40,6 +42,10 @@
 from yt.utilities.physical_constants import mass_sun_cgs
 from yt.frontends.art.definitions import *
 
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
+
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
@@ -218,6 +224,7 @@
               particle_type=True,
               convert_function=lambda x: x.convert("particle_mass"))
 
+
 def _particle_age(field, data):
     tr = data["particle_creation_time"]
     return data.pf.current_time - tr
@@ -260,3 +267,55 @@
     return data["particle_mass"]/mass_sun_cgs
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
+
+# Particle Deposition Fields
+_ptypes = ["all", "darkmatter", "stars", "specie0"]
+
+for _ptype in _ptypes:
+    particle_vector_functions(_ptype, ["particle_position_%s" % ax for ax in 'xyz'],
+                                     ["particle_velocity_%s" % ax for ax in 'xyz'],
+                              ARTFieldInfo)
+    particle_deposition_functions(_ptype, "Coordinates", "particle_mass",
+                                   ARTFieldInfo)
+
+# Mixed Fluid-Particle Fields
+
+def baryon_density(field, data):
+    rho = data["deposit", "stars_density"]
+    rho += data["gas", "Density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "baryon_density"),
+         function = baryon_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Baryon Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+def total_density(field, data):
+    rho = data["deposit", "baryon_density"]
+    rho += data["deposit", "specie0_density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "total_density"),
+         function = total_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Total Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+def multimass_density(field, data):
+    rho = data["deposit", "baryon_density"]
+    rho += data["deposit", "darkmatter_density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "multimass_density"),
+         function = multimass_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Multimass Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -42,6 +42,14 @@
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
     tb, ages = None, None
+    cache = None
+    masks = None
+    caching = True
+
+    def __init__(self):
+        self.cache = {}
+        self.masks = {}
+        super(IOHandlerART, self).__init__()
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -68,85 +76,103 @@
                 cp += subset.cell_count
         return tr
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def _get_mask(self, selector, ftype):
+        key = (selector, ftype)
+        if key in self.masks.keys() and self.caching:
+            return self.masks[key]
+        pf = self.pf
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
+        pstr = 'particle_position_%s'
+        x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
+        mask = selector.select_points(x, y, z)
+        if self.caching:
+            self.masks[key] = mask
+            return self.masks[key]
+        else:
+            return mask
+
+    def _get_field(self,  field):
+        if field in self.cache.keys() and self.caching:
+            mylog.debug("Cached %s", str(field))
+            return self.cache[field]
+        mylog.debug("Reading %s", str(field))
         tr = {}
-        fields_read = []
-        for chunk in chunks:
-            level = chunk.objs[0].domain.domain_level
-            pf = chunk.objs[0].domain.pf
-            masks = {}
-            ws, ls = pf.parameters["wspecies"], pf.parameters["lspecies"]
-            sizes = np.diff(np.concatenate(([0], ls)))
-            ptmax = ws[-1]
-            npt = ls[-1]
-            nstars = ls[-1]-ls[-2]
-            file_particle = pf._file_particle_data
-            file_stars = pf._file_particle_stars
-            ftype_old = None
-            for field in fields:
-                if field in fields_read:
-                    continue
-                ftype, fname = field
-                pbool, idxa, idxb = _determine_field_size(pf, ftype, ls, ptmax)
-                npa = idxb-idxa
-                if not ftype_old == ftype:
-                    Nrow = pf.parameters["Nrow"]
-                    rp = lambda ax: read_particles(
-                        file_particle, Nrow, idxa=idxa,
-                        idxb=idxb, field=ax)
-                    x, y, z = (rp(ax) for ax in 'xyz')
-                    dd = pf.domain_dimensions[0]
-                    off = 1.0/dd
-                    x, y, z = (t.astype('f8')/dd - off for t in (x, y, z))
-                    mask = selector.select_points(x, y, z)
-                    size = mask.sum()
-                for i, ax in enumerate('xyz'):
-                    if fname.startswith("particle_position_%s" % ax):
-                        tr[field] = vars()[ax]
-                    if fname.startswith("particle_velocity_%s" % ax):
-                        tr[field] = rp('v'+ax)
-                if fname == "particle_mass":
-                    a = 0
-                    data = np.zeros(npa, dtype='f8')
-                    for ptb, size, m in zip(pbool, sizes, ws):
-                        if ptb:
-                            data[a:a+size] = m
-                            a += size
-                    tr[field] = data
-                elif fname == "particle_index":
-                    tr[field] = np.arange(idxa, idxb).astype('int64')
-                elif fname == "particle_type":
-                    a = 0
-                    data = np.zeros(npa, dtype='int')
-                    for i, (ptb, size) in enumerate(zip(pbool, sizes)):
-                        if ptb:
-                            data[a:a+size] = i
-                            a += size
-                    tr[field] = data
-                if pbool[-1] and fname in particle_star_fields:
-                    data = read_star_field(file_stars, field=fname)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    if nstars > 0:
-                        temp[-nstars:] = data
-                    tr[field] = temp
-                if fname == "particle_creation_time":
-                    self.tb, self.ages, data = interpolate_ages(
-                        tr[field][-nstars:],
-                        file_stars,
-                        self.tb,
-                        self.ages,
-                        pf.current_time)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    temp[-nstars:] = data
-                    tr[field] = temp
-                    del data
-                tr[field] = tr[field][mask].astype('f8')
-                ftype_old = ftype
-                fields_read.append(field)
+        ftype, fname = field
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, 
+                                                  self.ls, ptmax)
+        npa = idxb - idxa
+        sizes = np.diff(np.concatenate(([0], self.ls)))
+        rp = lambda ax: read_particles(
+            self.file_particle, self.Nrow, idxa=idxa,
+            idxb=idxb, fields=ax)
+        for i, ax in enumerate('xyz'):
+            if fname.startswith("particle_position_%s" % ax):
+                dd = self.pf.domain_dimensions[0]
+                off = 1.0/dd
+                tr[field] = rp([ax])[0]/dd - off
+            if fname.startswith("particle_velocity_%s" % ax):
+                tr[field], = rp(['v'+ax])
+        if fname == "particle_mass":
+            a = 0
+            data = np.zeros(npa, dtype='f8')
+            for ptb, size, m in zip(pbool, sizes, self.ws):
+                if ptb:
+                    data[a:a+size] = m
+                    a += size
+            tr[field] = data
+        elif fname == "particle_index":
+            tr[field] = np.arange(idxa, idxb)
+        elif fname == "particle_type":
+            a = 0
+            data = np.zeros(npa, dtype='int')
+            for i, (ptb, size) in enumerate(zip(pbool, sizes)):
+                if ptb:
+                    data[a: a + size] = i
+                    a += size
+            tr[field] = data
+        if pbool[-1] and fname in particle_star_fields:
+            data = read_star_field(self.file_stars, field=fname)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            nstars = self.ls[-1]-self.ls[-2]
+            if nstars > 0:
+                temp[-nstars:] = data
+            tr[field] = temp
+        if fname == "particle_creation_time":
+            self.tb, self.ages, data = interpolate_ages(
+                tr[field][-nstars:],
+                self.file_stars,
+                self.tb,
+                self.ages,
+                self.pf.current_time)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            temp[-nstars:] = data
+            tr[field] = temp
+            del data
         if tr == {}:
             tr = dict((f, np.array([])) for f in fields)
-        return tr
+        if self.caching:
+            self.cache[field] = tr[field]
+            return self.cache[field]
+        else:
+            return tr[field]
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        chunk = chunks.next()
+        self.pf = chunk.objs[0].domain.pf
+        self.ws = self.pf.parameters["wspecies"]
+        self.ls = self.pf.parameters["lspecies"]
+        self.file_particle = self.pf._file_particle_data
+        self.file_stars = self.pf._file_particle_stars
+        self.Nrow = self.pf.parameters["Nrow"]
+        data = {f:np.array([]) for f in fields}
+        for f in fields:
+            ftype, fname = f
+            mask = self._get_mask(selector, ftype)
+            arr = self._get_field(f)[mask].astype('f8')
+            data[f] = np.concatenate((arr, data[f]))
+        return data
 
 def _determine_field_size(pf, field, lspecies, ptmax):
     pbool = np.zeros(len(lspecies), dtype="bool")
@@ -361,27 +387,29 @@
     return ranges
 
 
-def read_particles(file, Nrow, idxa, idxb, field):
+def read_particles(file, Nrow, idxa, idxb, fields):
     words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4  # for file_particle_data; not always true?
     np_per_page = Nrow**2  # defined in ART a_setup.h, # of particles/page
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
-    data = np.array([], 'f4')
     fh = open(file, 'r')
     skip, count = idxa, idxb - idxa
     kwargs = dict(words=words, real_size=real_size, 
                   np_per_page=np_per_page, num_pages=num_pages)
-    ranges = get_ranges(skip, count, field, **kwargs)
-    data = None
-    for seek, this_count in ranges:
-        fh.seek(seek)
-        temp = np.fromfile(fh, count=this_count, dtype='>f4')
-        if data is None:
-            data = temp
-        else:
-            data = np.concatenate((data, temp))
+    arrs = []
+    for field in fields:
+        ranges = get_ranges(skip, count, field, **kwargs)
+        data = None
+        for seek, this_count in ranges:
+            fh.seek(seek)
+            temp = np.fromfile(fh, count=this_count, dtype='>f4')
+            if data is None:
+                data = temp
+            else:
+                data = np.concatenate((data, temp))
+        arrs.append(data.astype('f8'))
     fh.close()
-    return data
+    return arrs
 
 
 def read_star_field(file, field=None):

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -336,3 +336,39 @@
         return self.ofield
 
 deposit_cic = CICDeposit
+
+cdef class WeightedMeanParticleField(ParticleDepositOperation):
+    # Deposit both mass * field and mass into two scalars
+    # then in finalize divide mass * field / mass
+    cdef np.float64_t *wf
+    cdef public object owf
+    cdef np.float64_t *w
+    cdef public object ow
+    def initialize(self):
+        self.owf = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray wfarr = self.owf
+        self.wf = <np.float64_t*> wfarr.data
+        
+        self.ow = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray warr = self.ow
+        self.w = <np.float64_t*> warr.data
+    
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, 
+                      np.float64_t ppos[3],
+                      np.float64_t *fields 
+                      ):
+        cdef int ii[3], i
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
+        self.w[ gind(ii[0], ii[1], ii[2], dim) + offset] += fields[1]
+        self.wf[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0] * fields[1]
+        
+    def finalize(self):
+        return self.owf / self.ow
+
+deposit_weighted_mean= WeightedMeanParticleField
+

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list