[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jun 3 13:41:12 PDT 2013


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/2fac5fa49090/
Changeset:   2fac5fa49090
Branch:      stable
User:        MatthewTurk
Date:        2013-06-03 22:39:38
Summary:     Merging from mainline of development
Affected #:  58 files

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -32,6 +32,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -10,14 +10,15 @@
 # subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# And, feel free to drop me a line: matthewturk at gmail.com
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
 #
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="stable" # This is the branch to which we will forcibly update.
 
-if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
     DEST_DIR=${YT_DEST}
 fi
@@ -97,6 +98,48 @@
 
 LOG_FILE="${DEST_DIR}/yt_install.log"
 
+function write_config
+{
+    CONFIG_FILE=${DEST_DIR}/.yt_config
+
+    echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+    echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
+    echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
+    echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
+    echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
+    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
+    echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
+    echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
+    echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
+    echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
+    echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
+    echo MPL_SUPP_LDFLAGS=${MPL_SUPP_LDFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CFLAGS=${MPL_SUPP_CFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CXXFLAGS=${MPL_SUPP_CXXFLAGS} >> ${CONFIG_FILE}
+    echo MAKE_PROCS=${MAKE_PROCS} >> ${CONFIG_FILE}
+    if [ ${HDF5_DIR} ]
+    then
+        echo ${HDF5_DIR} >> ${CONFIG_FILE}
+    fi
+    if [ ${NUMPY_ARGS} ]
+    then
+        echo ${NUMPY_ARGS} >> ${CONFIG_FILE}
+    fi
+}
+
+# Write config settings to file.
+CONFIG_FILE=${DEST_DIR}/.yt_config
+mkdir -p ${DEST_DIR}
+if [ -z ${REINST_YT} ] || [ ${REINST_YT} -neq 1 ]
+then
+    write_config
+elif [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -f ${CONFIG_FILE} ]
+then
+    USED_CONFIG=1
+    source ${CONFIG_FILE}
+fi
+
 function get_willwont
 {
     if [ $1 -eq 1 ]
@@ -375,6 +418,10 @@
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
 
+printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
+get_willwont ${INST_0MQ}
+echo "be installing Rockstar"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -396,6 +443,12 @@
 echo "hit Ctrl-C."
 echo
 host_specific
+if [ ${USED_CONFIG} ]
+then
+    echo "Settings were loaded from ${CONFIG_FILE}."
+    echo "Remove this file if you wish to return to the default settings."
+    echo
+fi
 echo "========================================================================"
 echo
 read -p "[hit enter] "
@@ -565,7 +618,7 @@
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
+                sed -i.bak -e 's/soname/install_name/' -e "s|CC=gcc|CC=${CC}|" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -117,3 +117,6 @@
 from .two_point_functions.api import \
     TwoPointFunctions, \
     FcnSet
+
+from .radmc3d_export.api import \
+    RadMC3DWriter

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -108,6 +108,7 @@
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
 
         self.light_ray_solution = []
+        self.halo_lists = {}
         self._data = {}
 
         # Get list of datasets for light ray solution.
@@ -192,6 +193,7 @@
                        get_los_velocity=False,
                        get_nearest_halo=False,
                        nearest_halo_fields=None,
+                       halo_list_file=None,
                        halo_profiler_parameters=None,
                        njobs=1, dynamic=False):
         """
@@ -229,6 +231,10 @@
             A list of fields to be calculated for the halos nearest to
             every lixel in the ray.
             Default: None.
+        halo_list_file : str
+            Filename containing a list of halo properties to be used 
+            for getting the nearest halos to absorbers.
+            Default: None.
         halo_profiler_parameters: dict
             A dictionary of parameters to be passed to the HaloProfiler
             to create the appropriate data used to get properties for
@@ -287,7 +293,7 @@
         >>> # Make the profiles.
         >>> halo_profiler_actions.append({'function': make_profiles,
         ...                           'args': None,
-        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...                           'kwargs': {'filename': 'VirializedHalos.h5'}})
         ...
         >>> halo_list = 'filtered'
         >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
@@ -305,6 +311,7 @@
         ...                   get_nearest_halo=True,
         ...                   nearest_halo_fields=['TotalMassMsun_100',
         ...                                        'RadiusMpc_100'],
+        ...                   halo_list_file='VirializedHalos.h5',
         ...                   halo_profiler_parameters=halo_profiler_parameters,
         ...                   get_los_velocity=True)
         
@@ -321,17 +328,18 @@
         # Initialize data structures.
         self._data = {}
         if fields is None: fields = []
-        all_fields = [field for field in fields]
+        data_fields = fields[:]
+        all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_nearest_halo:
             all_fields.extend(['x', 'y', 'z', 'nearest_halo'])
             all_fields.extend(['nearest_halo_%s' % field \
                                for field in nearest_halo_fields])
-            fields.extend(['x', 'y', 'z'])
+            data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['x-velocity', 'y-velocity',
                                'z-velocity', 'los_velocity'])
-            fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
+            data_fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
 
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
@@ -348,10 +356,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            if get_nearest_halo:
-                halo_list = self._get_halo_list(my_segment['filename'],
-                                                **halo_profiler_parameters)
-
             # Load dataset for segment.
             pf = load(my_segment['filename'])
 
@@ -373,7 +377,7 @@
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
-                for field in fields:
+                for field in data_fields:
                     sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
@@ -400,6 +404,9 @@
 
             # Calculate distance to nearest object on halo list for each lixel.
             if get_nearest_halo:
+                halo_list = self._get_halo_list(pf, fields=nearest_halo_fields,
+                                                filename=halo_list_file,
+                                                **halo_profiler_parameters)
                 sub_data.update(self._get_nearest_halo_properties(sub_data, halo_list,
                                 fields=nearest_halo_fields))
                 sub_data['nearest_halo'] *= pf.units['mpccm']
@@ -434,58 +441,92 @@
         self._data = all_data
         return all_data
 
-    def _get_halo_list(self, dataset, halo_profiler_kwargs=None,
+    def _get_halo_list(self, pf, fields=None, filename=None, 
+                       halo_profiler_kwargs=None,
                        halo_profiler_actions=None, halo_list='all'):
-        "Load a list of halos for the dataset."
+        "Load a list of halos for the pf."
+
+        if str(pf) in self.halo_lists:
+            return self.halo_lists[str(pf)]
+
+        if fields is None: fields = []
+
+        if filename is not None and \
+                os.path.exists(os.path.join(pf.fullpath, filename)):
+
+            my_filename = os.path.join(pf.fullpath, filename)
+            mylog.info("Loading halo list from %s." % my_filename)
+            my_list = {}
+            in_file = h5py.File(my_filename, 'r')
+            for field in fields + ['center']:
+                my_list[field] = in_file[field][:]
+            in_file.close()
+
+        else:
+            my_list = self._halo_profiler_list(pf, fields=fields,
+                                               halo_profiler_kwargs=halo_profiler_kwargs,
+                                               halo_profiler_actions=halo_profiler_actions,
+                                               halo_list=halo_list)
+
+        self.halo_lists[str(pf)] = my_list
+        return self.halo_lists[str(pf)]
+
+    def _halo_profiler_list(self, pf, fields=None, 
+                            halo_profiler_kwargs=None,
+                            halo_profiler_actions=None, halo_list='all'):
+        "Run the HaloProfiler to get the halo list."
 
         if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
         if halo_profiler_actions is None: halo_profiler_actions = []
 
-        hp = HaloProfiler(dataset, **halo_profiler_kwargs)
+        hp = HaloProfiler(pf, **halo_profiler_kwargs)
         for action in halo_profiler_actions:
             if not action.has_key('args'): action['args'] = ()
             if not action.has_key('kwargs'): action['kwargs'] = {}
             action['function'](hp, *action['args'], **action['kwargs'])
 
         if halo_list == 'all':
-            return_list = copy.deepcopy(hp.all_halos)
+            hp_list = copy.deepcopy(hp.all_halos)
         elif halo_list == 'filtered':
-            return_list = copy.deepcopy(hp.filtered_halos)
+            hp_list = copy.deepcopy(hp.filtered_halos)
         else:
             mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-            return_list = None
+            hp_list = None
 
         del hp
+
+        # Create position array from halo list.
+        return_list = dict([(field, []) for field in fields + ['center']])
+        for halo in hp_list:
+            for field in fields + ['center']:
+                return_list[field].append(halo[field])
+        for field in fields + ['center']:
+            return_list[field] = np.array(return_list[field])
         return return_list
-
+        
     def _get_nearest_halo_properties(self, data, halo_list, fields=None):
         """
         Calculate distance to nearest object in halo list for each lixel in data.
-        Return list of distances and masses of nearest objects.
+        Return list of distances and other properties of nearest objects.
         """
 
         if fields is None: fields = []
+        field_data = dict([(field, np.zeros_like(data['x'])) \
+                           for field in fields])
+        nearest_distance = np.zeros_like(data['x'])
 
-        # Create position array from halo list.
-        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
-                                                       halo_list))) \
-                                  for field in fields])
-
-        nearest_distance = np.zeros(data['x'].shape)
-        field_data = dict([(field, np.zeros(data['x'].shape)) \
-                           for field in fields])
-        for index in xrange(nearest_distance.size):
-            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
-                                                            data['y'][index],
-                                                            data['z'][index]]),
-                                                  halo_centers))
-            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
-                                                                  data['y'][index],
-                                                                  data['z'][index]]),
-                                                        halo_centers[nearest])
-            for field in fields:
-                field_data[field][index] = halo_field_values[field][nearest]
+        if halo_list['center'].size > 0:
+            for index in xrange(nearest_distance.size):
+                nearest = np.argmin(periodic_distance(np.array([data['x'][index],
+                                                                data['y'][index],
+                                                                data['z'][index]]),
+                                                      halo_list['center']))
+                nearest_distance[index] = periodic_distance(np.array([data['x'][index],
+                                                                      data['y'][index],
+                                                                      data['z'][index]]),
+                                                            halo_list['center'][nearest])
+                for field in fields:
+                    field_data[field][index] = halo_list[field][nearest]
 
         return_data = {'nearest_halo': nearest_distance}
         for field in fields:

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -143,10 +143,10 @@
             return self.CoM
         pm = self["ParticleMassMsun"]
         c = {}
-        c[0] = self["particle_position_x"]
-        c[1] = self["particle_position_y"]
-        c[2] = self["particle_position_z"]
-        c_vec = np.zeros(3)
+        # We shift into a box where the origin is the left edge
+        c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
+        c[1] = self["particle_position_y"] - self.pf.domain_left_edge[1]
+        c[2] = self["particle_position_z"] - self.pf.domain_left_edge[2]
         com = []
         for i in range(3):
             # A halo is likely periodic around a boundary if the distance 
@@ -159,13 +159,12 @@
                 com.append(c[i])
                 continue
             # Now we want to flip around only those close to the left boundary.
-            d_left = c[i] - self.pf.domain_left_edge[i]
-            sel = (d_left <= (self.pf.domain_width[i]/2))
+            sel = (c[i] <= (self.pf.domain_width[i]/2))
             c[i][sel] += self.pf.domain_width[i]
             com.append(c[i])
         com = np.array(com)
         c = (com * pm).sum(axis=1) / pm.sum()
-        return c%self.pf.domain_width
+        return c%self.pf.domain_width + self.pf.domain_left_edge
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -1062,7 +1061,7 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
+        *dm_only* is True (default), only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
         self._data_source = data_source
@@ -1097,7 +1096,7 @@
     def _get_dm_indices(self):
         if 'creation_time' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on creation time")
-            return (self._data_source["creation_time"] < 0)
+            return (self._data_source["creation_time"] <= 0)
         elif 'particle_type' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
@@ -1367,6 +1366,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")
@@ -1457,7 +1457,7 @@
 class HOPHaloList(HaloList):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "HOP"
@@ -1656,7 +1656,7 @@
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "parallelHOP"
@@ -2008,13 +2008,11 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
-
+        ensure_dir_exists(filename)
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
 
+
     def write_particle_lists_txt(self, prefix):
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file.
@@ -2031,13 +2029,11 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
+
     @parallel_blocking_call
     def write_particle_lists(self, prefix):
         r"""Write out the particle data for halos to HDF5 files.
@@ -2058,10 +2054,7 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
@@ -2090,15 +2083,12 @@
         ellipsoid_data : bool.
             Whether to save the ellipsoidal information to the files.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.dump("MyHalos")
         """
-        # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
-
+        ensure_dir_exists(basename)
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
@@ -2131,7 +2121,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     resize : bool
         Turns load-balancing on or off. Default = True.
     kdtree : string
@@ -2460,7 +2450,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number
@@ -2565,7 +2555,7 @@
         applied.  Default = 0.2.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -338,6 +338,8 @@
                     hires_only = (self.hires_dm_mass is not None),
                     **kwargs)
         # Make the directory to store the halo lists in.
+        if not self.outbase:
+            self.outbase = os.getcwd()
         if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
                 os.makedirs(self.outbase)

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -212,7 +212,7 @@
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
             if i == (self.num_sigma_bins - 3): break
 
-        self.dis = dis  / self.pf['CosmologyComovingBoxSize']**3.0 * self.hubble0**3.0
+        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
 
     def sigmaM(self):
         """

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -105,7 +105,8 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = np.log10(temp_profile[field])
+            temp_profile[field] = np.log10(np.clip(temp_profile[field], 1e-90, 
+                                                   max(temp_profile[field])))
 
     virial = dict((field, 0.0) for field in fields)
 

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import gc
 import numpy as np
 import os
 import h5py
@@ -583,7 +584,7 @@
 
             r_min = 2 * self.pf.h.get_smallest_dx() * self.pf['mpc']
             if (halo['r_max'] / r_min < PROFILE_RADIUS_THRESHOLD):
-                mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
+                mylog.debug("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
                 return None
 
             # get a sphere object to profile
@@ -630,6 +631,10 @@
                 g.clear_data()
             sphere.clear_data()
             del sphere
+            # Currently, this seems to be the only way to prevent large 
+            # halo profiling runs from running out of ram.
+            # It would be good to track down the real cause at some point.
+            gc.collect()
 
         return profile
 

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -68,8 +68,10 @@
         self.prof = prof
 
     def plot_everything(self, dirname = None):
-        if dirname is None: dirname = "%s_profile_plots/" % (self.pf)
-        if not os.path.isdir(dirname): os.makedirs(dirname)
+        if not dirname:
+            dirname = "%s_profile_plots/" % (self.pf)
+        if not os.path.isdir(dirname):
+            os.makedirs(dirname)
         import matplotlib; matplotlib.use("Agg")
         import pylab
         for field in self.prof.keys():

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -0,0 +1,334 @@
+"""
+Code to export from yt to RadMC3D
+
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from yt.mods import *
+from yt.utilities.lib.write_array import \
+    write_3D_array, write_3D_vector_array
+
+class RadMC3DLayer:
+    '''
+
+    This class represents an AMR "layer" of the style described in
+    the radmc3d manual. Unlike yt grids, layers may not have more
+    than one parent, so level L grids will need to be split up
+    if they straddle two or more level L - 1 grids. 
+
+    '''
+    def __init__(self, level, parent, unique_id, LE, RE, dim):
+        self.level = level
+        self.parent = parent
+        self.LeftEdge = LE
+        self.RightEdge = RE
+        self.ActiveDimensions = dim
+        self.id = unique_id
+
+    def get_overlap_with(self, grid):
+        '''
+
+        Returns the overlapping region between two Layers,
+        or a layer and a grid. RE < LE means in any direction
+        means no overlap.
+
+        '''
+        LE = np.maximum(self.LeftEdge,  grid.LeftEdge)
+        RE = np.minimum(self.RightEdge, grid.RightEdge)
+        return LE, RE
+
+    def overlaps(self, grid):
+        '''
+
+        Returns whether or not this layer overlaps a given grid
+        
+        '''
+        LE, RE = self.get_overlap_with(grid)
+        if np.any(RE <= LE):
+            return False
+        else:
+            return True
+
+class RadMC3DWriter:
+    '''
+
+    This class provides a mechanism for writing out data files in a format
+    readable by radmc3d. Currently, only the ASCII, "Layer" style file format
+    is supported. For more information please see the radmc3d manual at:
+    http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d
+
+    Parameters
+    ----------
+
+    pf : `StaticOutput`
+        This is the parameter file object corresponding to the
+        simulation output to be written out.
+
+    max_level : int
+        An int corresponding to the maximum number of levels of refinement
+        to include in the output. Often, this does not need to be very large
+        as information on very high levels is frequently unobservable.
+        Default = 2. 
+
+    Examples
+    --------
+
+    This will create a field called "DustDensity" and write it out to the
+    file "dust_density.inp" in a form readable by radmc3d. It will also write
+    a "dust_temperature.inp" file with everything set to 10.0 K: 
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> dust_to_gas = 0.01
+    >>> def _DustDensity(field, data):
+    ...     return dust_to_gas*data["Density"]
+    >>> add_field("DustDensity", function=_DustDensity)
+
+    >>> def _DustTemperature(field, data):
+    ...     return 10.0*data["Ones"]
+    >>> add_field("DustTemperature", function=_DustTemperature)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_dust_file("DustDensity", "dust_density.inp")
+    >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
+
+    This will create a field called "NumberDensityCO" and write it out to
+    the file "numberdens_co.inp". It will also write out information about
+    the gas velocity to "gas_velocity.inp" so that this broadening may be
+    included in the radiative transfer calculation by radmc3d:
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> x_co = 1.0e-4
+    >>> mu_h = 2.34e-24
+    >>> def _NumberDensityCO(field, data):
+    ...     return (x_co/mu_h)*data["Density"]
+    >>> add_field("NumberDensityCO", function=_NumberDensityCO)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
+    >>> velocity_fields = ["x-velocity", "y-velocity", "z-velocity"]
+    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") 
+
+    '''
+
+    def __init__(self, pf, max_level=2):
+        self.max_level = max_level
+        self.cell_count = 0 
+        self.layers = []
+        self.domain_dimensions = pf.domain_dimensions
+        self.domain_left_edge  = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.grid_filename = "amr_grid.inp"
+        self.pf = pf
+
+        base_layer = RadMC3DLayer(0, None, 0, \
+                                  self.domain_left_edge, \
+                                  self.domain_right_edge, \
+                                  self.domain_dimensions)
+
+        self.layers.append(base_layer)
+        self.cell_count += np.product(pf.domain_dimensions)
+
+        for grid in pf.h.grids:
+            if grid.Level <= self.max_level:
+                self._add_grid_to_layers(grid)
+
+    def _get_parents(self, grid):
+        parents = []  
+        for potential_parent in self.layers:
+            if potential_parent.level == grid.Level - 1:
+                if potential_parent.overlaps(grid):
+                    parents.append(potential_parent)
+        return parents
+
+    def _add_grid_to_layers(self, grid):
+        parents = self._get_parents(grid)
+        for parent in parents:
+            LE, RE = parent.get_overlap_with(grid)
+            N = (RE - LE) / grid.dds
+            N = np.array([int(n + 0.5) for n in N])
+            new_layer = RadMC3DLayer(grid.Level, parent.id, \
+                                     len(self.layers), \
+                                     LE, RE, N)
+            self.layers.append(new_layer)
+            self.cell_count += np.product(N)
+            
+    def write_amr_grid(self):
+        '''
+        This routine writes the "amr_grid.inp" file that describes the mesh
+        radmc3d will use.
+
+        '''
+        dims = self.domain_dimensions
+        LE   = self.domain_left_edge
+        RE   = self.domain_right_edge
+
+        # calculate cell wall positions
+        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+
+        # writer file header
+        grid_file = open(self.grid_filename, 'w')
+        grid_file.write('1 \n') # iformat is always 1
+        if self.max_level == 0:
+            grid_file.write('0 \n')
+        else:
+            grid_file.write('10 \n') # only layer-style AMR files are supported
+        grid_file.write('1 \n') # only cartesian coordinates are supported
+        grid_file.write('0 \n') 
+        grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
+        grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
+        if self.max_level != 0:
+            s = str(self.max_level) + '    ' + str(len(self.layers)-1) + '\n'
+            grid_file.write(s)
+
+        # write base grid cell wall positions
+        for x in xs:
+            grid_file.write(x + '    ')
+        grid_file.write('\n')
+
+        for y in ys:
+            grid_file.write(y + '    ')
+        grid_file.write('\n')
+
+        for z in zs:
+            grid_file.write(z + '    ')
+        grid_file.write('\n')
+
+        # write information about fine layers, skipping the base layer:
+        for layer in self.layers[1:]:
+            p = layer.parent
+            dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions)
+            if p == 0:
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            else:
+                LE = np.zeros(3)
+                for potential_parent in self.layers:
+                    if potential_parent.id == p:
+                        LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            ix  = int(ind[0]+0.5)
+            iy  = int(ind[1]+0.5)
+            iz  = int(ind[2]+0.5)
+            nx, ny, nz = layer.ActiveDimensions / 2
+            s = '{}    {}    {}    {}    {}    {}    {} \n'
+            s = s.format(p, ix, iy, iz, nx, ny, nz)
+            grid_file.write(s)
+
+        grid_file.close()
+
+    def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
+        cg = self.pf.h.covering_grid(level, LE, dim, num_ghost_zones=1)
+        if isinstance(field, list):
+            data_x = cg[field[0]]
+            data_y = cg[field[1]]
+            data_z = cg[field[2]]
+            write_3D_vector_array(data_x, data_y, data_z, fhandle)
+        else:
+            data = cg[field]
+            write_3D_array(data, fhandle)
+
+    def write_dust_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        thermal dust emission. In particular, if you have a field called
+        "DustDensity", you can write out a dust_density.inp file.
+
+        Parameters
+        ----------
+
+        field : string
+            The name of the field to be written out
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operations are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+        fhandle.write('1 \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+            
+        fhandle.close()
+
+    def write_line_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        line emission.
+
+        Parameters
+        ----------
+
+        field : string or list of 3 strings
+            If a string, the name of the field to be written out. If a list,
+            three fields that will be written to the file as a vector quantity.
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operation are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+
+        fhandle.close()

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/radmc3d_export/api.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/api.py
@@ -0,0 +1,30 @@
+"""
+API for RadMC3D Export code
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .RadMC3DInterface import \
+    RadMC3DWriter

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -20,4 +20,5 @@
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
+    config.add_subpackage("radmc3d_export")
     return config

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold007',
+    gold_standard_filename = 'gold008',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4080,7 +4080,7 @@
             if region in ["OR", "AND", "NOT", "(", ")"]:
                 s += region
             else:
-                s += region.__repr__(clean = True)
+                s += region.__repr__()
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -151,8 +151,12 @@
     particle masses in the object.
     """
     baryon_mass = data["CellMassMsun"].sum()
-    particle_mass = data["ParticleMassMsun"].sum()
-    return [baryon_mass + particle_mass]
+    try:
+        particle_mass = data["ParticleMassMsun"].sum()
+        total_mass = baryon_mass + particle_mass
+    except KeyError:
+        total_mass = baryon_mass
+    return [total_mass]
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -271,12 +271,14 @@
                 else: self[item] = vv.ravel()
                 return self[item]
         self.requested.append(item)
-        return defaultdict.__missing__(self, item)
+        if item not in self:
+            self[item] = self._read_data(item)
+        return self[item]
 
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)
-        if FI.has_key(field_name) and FI[field_name].particle_type:
+        if field_name in FI and FI[field_name].particle_type:
             self.requested.append(field_name)
             return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -165,7 +165,7 @@
     def get_smallest_appropriate_unit(self, v):
         max_nu = 1e30
         good_u = None
-        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'cm']:
+        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
             vv = v*self[unit]
             if vv < max_nu and vv > 1.0:
                 good_u = unit

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -44,7 +44,8 @@
     NeedsOriginalGrid, \
     NeedsDataField, \
     NeedsProperty, \
-    NeedsParameter
+    NeedsParameter, \
+    NullFunc
 
 from yt.utilities.physical_constants import \
      mh, \
@@ -440,7 +441,7 @@
           convert_function=_convertCellMassCode)
 
 def _TotalMass(field,data):
-    return (data["Density"]+data["Dark_Matter_Density"]) * data["CellVolume"]
+    return (data["Density"]+data["particle_density"]) * data["CellVolume"]
 add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,
@@ -453,7 +454,7 @@
           convert_function=_convertCellMassMsun)
 
 def _Matter_Density(field,data):
-    return (data['Density'] + data['Dark_Matter_Density'])
+    return (data['Density'] + data['particle_density'])
 add_field("Matter_Density",function=_Matter_Density,units=r"\rm{g}/\rm{cm^3}")
 
 def _ComovingDensity(field, data):
@@ -982,22 +983,29 @@
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
 
-def _convertDensity(data):
-    return data.convert("Density")
+# We add these fields so that the field detector can use them
+for field in ["particle_position_%s" % ax for ax in "xyz"] + \
+             ["ParticleMass"]:
+    # This marker should let everyone know not to use the fields, but NullFunc
+    # should do that, too.
+    add_field(field, function=NullFunc, particle_type = True,
+        units=r"UNDEFINED")
+
 def _pdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank
     CICDeposit_3(data["particle_position_x"].astype(np.float64),
                  data["particle_position_y"].astype(np.float64),
                  data["particle_position_z"].astype(np.float64),
-                 data["particle_mass"].astype(np.float32),
+                 data["ParticleMass"],
                  data["particle_position_x"].size,
                  blank, np.array(data.LeftEdge).astype(np.float64),
                  np.array(data.ActiveDimensions).astype(np.int32),
                  np.float64(data['dx']))
+    np.divide(blank, data["CellVolume"], blank)
     return blank
 add_field("particle_density", function=_pdensity,
-          validators=[ValidateGridType()], convert_function=_convertDensity,
+          validators=[ValidateGridType()],
           display_name=r"\mathrm{Particle}\/\mathrm{Density}")
 
 def _MagneticEnergy(field,data):

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -280,12 +280,12 @@
         projection_conversion="1")
 
 def _spdensity(field, data):
-    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float64')
     if data.star_mass.shape[0] ==0 : return grid_mass 
     amr_utils.CICDeposit_3(data.star_position_x,
                            data.star_position_y,
                            data.star_position_z,
-                           data.star_mass.astype('float32'),
+                           data.star_mass,
                            data.star_mass.shape[0],
                            grid_mass, 
                            np.array(data.LeftEdge).astype(np.float64),

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -4,11 +4,15 @@
 Author: Samuel W. Skillman <samskillman at gmail.com>
 Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: John ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/Goddard Space Flight Center
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  Copyright (C) 2008-2013 Samuel W. Skillman, Matthew Turk, J. S. Oishi.,
+  John ZuHone.
   All Rights Reserved.
 
   This file is part of yt.
@@ -54,13 +58,7 @@
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
         df = hierarchy.parameter_file.filename[4:-4]
-        if 'id0' not in hierarchy.parameter_file.filename:
-            gname = hierarchy.parameter_file.filename
-        else:
-            if id == 0:
-                gname = 'id0/%s.vtk' % df
-            else:
-                gname = 'id%i/%s-id%i%s.vtk' % (id, df[:-5], id, df[-5:] )
+        gname = hierarchy.grid_filenames[id]
         AMRGridPatch.__init__(self, id, filename = gname,
                               hierarchy = hierarchy)
         self.filename = gname
@@ -85,6 +83,9 @@
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
+    def __repr__(self):
+        return "AthenaGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
 def parse_line(line, grid):
     # grid is a dictionary
     splitup = line.strip().split()
@@ -113,8 +114,6 @@
         grid['read_field'] = field
         grid['read_type'] = 'vector'
 
-
-
 class AthenaHierarchy(AMRHierarchy):
 
     grid = AthenaGrid
@@ -220,6 +219,10 @@
         dname = self.hierarchy_filename
         gridlistread = glob.glob('id*/%s-id*%s' % (dname[4:-9],dname[-9:] ))
         gridlistread.insert(0,self.hierarchy_filename)
+        if 'id0' in dname :
+            gridlistread += glob.glob('id*/lev*/%s*-lev*%s' % (dname[4:-9],dname[-9:]))
+        else :
+            gridlistread += glob.glob('lev*/%s*-lev*%s' % (dname[:-9],dname[-9:]))
         self.num_grids = len(gridlistread)
         dxs=[]
         self.grids = np.empty(self.num_grids, dtype='object')
@@ -228,13 +231,9 @@
         gdds = np.empty((self.num_grids,3), dtype='float64')
         gdims = np.ones_like(glis)
         j = 0
+        self.grid_filenames = gridlistread
         while j < (self.num_grids):
             f = open(gridlistread[j],'rb')
-            f.close()
-            if j == 0:
-                f = open(dname,'rb')
-            if j != 0:
-                f = open('id%i/%s-id%i%s' % (j, dname[4:-9],j, dname[-9:]),'rb')
             gridread = {}
             gridread['read_field'] = None
             gridread['read_type'] = None
@@ -251,6 +250,7 @@
                 if len(line) == 0: break
                 line = f.readline()
             f.close()
+            levels[j] = gridread['level']
             glis[j,0] = gridread['left_edge'][0]
             glis[j,1] = gridread['left_edge'][1]
             glis[j,2] = gridread['left_edge'][2]
@@ -356,17 +356,44 @@
         self.time_units = {}
         if len(self.parameters) == 0:
             self._parse_parameter_file()
-        self._setup_nounits_units()
-        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.conversion_factors = defaultdict(lambda: 1.0)    
+        if self.specified_parameters.has_key("LengthUnits") :
+            self._setup_getunits_units()
+        else :
+            self._setup_nounits_units()
+        self.parameters["Time"] = self.conversion_factors["Time"]
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = self.conversion_factors["Time"] / sec_conversion[unit]
+                        
+    def _setup_getunits_units(self) :
+        box_proper = 3.24077e-25 * self.specified_parameters["LengthUnits"]
+        self.units['aye']  = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] * box_proper
+        if self.specified_parameters.has_key("TimeUnits"):
+            self.conversion_factors["Time"] = self.specified_parameters["TimeUnits"]
+        else :
+            self.conversion_factors["Time"] = 1.0
+        if self.specified_parameters.has_key("DensityUnits"):
+            self.conversion_factors["Density"] = self.specified_parameters["DensityUnits"]
+        else :
+            self.conversion_factors["Density"] = 1.0
+        self.conversion_factors["Mass"] = self.conversion_factors["Density"]*self.units["cm"]**3
+        for a in 'xyz':
+            self.conversion_factors["%s-velocity" % (a)] = self.units["cm"]/self.conversion_factors["Time"]
+                                            
     def _setup_nounits_units(self):
         self.conversion_factors["Time"] = 1.0
+        self.conversion_factors["Density"] = 1.0
+        self.conversion_factors["Mass"] = 1.0
+        for a in 'xyz':
+            self.conversion_factors["%s-velocity" % (a)] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
+        
     def _parse_parameter_file(self):
         self._handle = open(self.parameter_filename, "rb")
         # Read the start of a grid to get simulation parameters.
@@ -414,12 +441,20 @@
 
         dname = self.parameter_filename
         gridlistread = glob.glob('id*/%s-id*%s' % (dname[4:-9],dname[-9:] ))
+        if 'id0' in dname :
+            gridlistread += glob.glob('id*/lev*/%s*-lev*%s' % (dname[4:-9],dname[-9:]))
+        else :
+            gridlistread += glob.glob('lev*/%s*-lev*%s' % (dname[:-9],dname[-9:]))
         self.nvtk = len(gridlistread)+1 
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
         self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        if self.specified_parameters.has_key("gamma") :
+            self.parameters["Gamma"] = self.specified_parameters["gamma"]
+        else :
+            self.parameters["Gamma"] = 5./3. 
         self._handle.close()
 
 

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -5,6 +5,8 @@
 Affiliation: University of Colorado at Boulder
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: John A. ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/Goddard Space Flight Center
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
@@ -41,14 +43,6 @@
     kboltz,mh
 import yt.data_objects.universal_fields
 
-log_translation_dict = {}
-
-translation_dict = {"Density": "density",
-                    "Pressure": "pressure",
-                    "x-velocity": "velocity_x",
-                    "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z"}
-
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
 
@@ -56,60 +50,135 @@
 add_athena_field = KnownAthenaFields.add_field
 
 add_athena_field("density", function=NullFunc, take_log=False,
-          units=r"",
-          projected_units =r"")
+                 units=r"", projected_units =r"")
 
 add_athena_field("pressure", function=NullFunc, take_log=False,
-          units=r"")
+                 units=r"")
+
+add_athena_field("total_energy", function=NullFunc, take_log=False,
+                 units=r"")
 
 add_athena_field("velocity_x", function=NullFunc, take_log=False,
-          units=r"")
+                 units=r"")
 
 add_athena_field("velocity_y", function=NullFunc, take_log=False,
-          units=r"")
+                 units=r"")
 
 add_athena_field("velocity_z", function=NullFunc, take_log=False,
-          units=r"")
+                 units=r"")
+
+add_athena_field("momentum_x", function=NullFunc, take_log=False,
+                 units=r"")
+
+add_athena_field("momentum_y", function=NullFunc, take_log=False,
+                 units=r"")
+
+add_athena_field("momentum_z", function=NullFunc, take_log=False,
+                 units=r"")
 
 add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
-          units=r"", display_name=r"$\rm{cell\/centered\/B_x}$")
+                 units=r"", display_name=r"$\rm{cell\/centered\/B_x}$")
 
 add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
-          units=r"", display_name=r"$\rm{cell\/centered\/B_y}$")
+                 units=r"", display_name=r"$\rm{cell\/centered\/B_y}$")
 
 add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
-          units=r"", display_name=r"$\rm{cell\/centered\/B_z}$")
+                 units=r"", display_name=r"$\rm{cell\/centered\/B_z}$")
 
-for f,v in log_translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=True)
+# In Athena, conservative or primitive variables may be written out.
+# By default, yt concerns itself with primitive variables. The following
+# field definitions allow for conversions to primitive variables in the
+# case that the file contains the conservative ones.
 
-for f,v in translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=False)
+def _convertDensity(data) :
+    return data.convert("Density")
+def _density(field, data) :
+    return data["density"]
+add_field("Density", function=_density, take_log=False,
+          units=r"\rm{g}/\rm{cm}^3", projected_units=r"\rm{g}/\rm{cm}^2",
+          convert_function=_convertDensity)
 
-def _Temperature(fields, data):
-    if data.has_field_parameter("mu") :
+def _convertVelocity(data):
+    return data.convert("x-velocity")
+def _xvelocity(field, data):
+    if "velocity_x" in data.pf.field_info:
+        return data["velocity_x"]
+    else:
+        return data["momentum_x"]/data["density"]           
+add_field("x-velocity", function=_xvelocity, take_log=False,
+          units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
+def _yvelocity(field, data):
+    if "velocity_y" in data.pf.field_info:
+        return data["velocity_y"]
+    else:
+        return data["momentum_y"]/data["density"]
+add_field("y-velocity", function=_yvelocity, take_log=False,
+          units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
+def _zvelocity(field, data):
+    if "velocity_z" in data.pf.field_info:
+        return data["velocity_z"]
+    else:
+        return data["momentum_z"]/data["density"]
+add_field("z-velocity", function=_zvelocity, take_log=False,
+          units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
+
+def _convertEnergy(data) :
+    return data.convert("x-velocity")**2
+def _gasenergy(field, data) :
+    if "pressure" in data.pf.field_info:
+        return data["pressure"]/(data.pf["Gamma"]-1.0)/data["density"]
+    else:
+        return (data["total_energy"] - 
+                0.5*(data["cell_centered_B_x"]**2 +
+                     data["cell_centered_B_y"]**2 +
+                     data["cell_centered_B_z"]**2) - 
+                0.5*(data["momentum_x"]**2 +
+                     data["momentum_y"]**2 +
+                     data["momentum_z"]**2)/data["density"])/data["density"]
+add_field("Gas_Energy", function=_gasenergy, take_log=False,
+          units=r"\rm{erg}/\rm{g}")
+
+def _convertPressure(data) :
+    return data.convert("Density")*data.convert("x-velocity")**2
+def _pressure(field, data) :
+    if "pressure" in data.pf.field_info:
+        return data["pressure"]
+    else:
+        return (data["total_energy"] -
+                0.5*(data["cell_centered_B_x"]**2 +
+                     data["cell_centered_B_y"]**2 +
+                     data["cell_centered_B_z"]**2) -
+                0.5*(data["momentum_x"]**2 +
+                     data["momentum_y"]**2 +
+                     data["momentum_z"]**2)/data["density"])*(data.pf["Gamma"]-1.0)
+add_field("Pressure", function=_pressure, take_log=False, convert_function=_convertPressure,
+          units=r"\rm{erg}/\rm{cm}^3", projected_units=r"\rm{erg}/\rm{cm}^2")
+
+def _temperature(field, data):
+    if data.has_field_parameter("mu"):
         mu = data.get_field_parameter("mu")
     else:
         mu = 0.6
     return mu*mh*data["Pressure"]/data["Density"]/kboltz
-add_field("Temperature", function=_Temperature, take_log=False,
+add_field("Temperature", function=_temperature, take_log=False,
           units=r"\rm{K}")
 
-def _Bx(fields, data):
-    factor = np.sqrt(4.*np.pi)
-    return data['cell_centered_B_x']*factor
+def _convertBfield(data):
+        return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
+def _Bx(field, data):
+    return data['cell_centered_B_x']
 add_field("Bx", function=_Bx, take_log=False,
-          units=r"\rm{Gauss}", display_name=r"B_x")
+          units=r"\rm{Gauss}", display_name=r"B_x",
+          convert_function=_convertBfield)
+def _By(field, data):
+    return data['cell_centered_B_y']
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y",
+          convert_function=_convertBfield)
+def _Bz(field, data):
+    return data['cell_centered_B_z']
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z",
+          convert_function=_convertBfield)
 
-def _By(fields, data):
-    factor = np.sqrt(4.*np.pi)
-    return data['cell_centered_B_y']*factor
-add_field("By", function=_By, take_log=False,
-          units=r"\rm{Gauss}", display_name=r"B_y")
 
-def _Bz(fields, data):
-    factor = np.sqrt(4.*np.pi)
-    return data['cell_centered_B_z']*factor
-add_field("Bz", function=_Bz, take_log=False,
-          units=r"\rm{Gauss}", display_name=r"B_z")
-

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -163,10 +163,12 @@
                         "angmomen_y",
                         "angmomen_z",
                         "mlast",
+                        "r",
                         "mdeut",
                         "n",
                         "mdot",
                         "burnstate",
+                        "luminosity",
                         "id"]
 
 for pf in _particle_field_list:
@@ -174,3 +176,18 @@
     add_field("particle_%s" % pf, function=pfunc,
               validators = [ValidateSpatial(0)],
               particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -77,6 +77,15 @@
         parses the Orion Star Particle text files
              
         """
+
+        fn = grid.pf.fullplotdir[:-4] + "sink"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+
+        # The basic fields that all sink particles have
         index = {'particle_mass': 0,
                  'particle_position_x': 1,
                  'particle_position_y': 2,
@@ -87,15 +96,38 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
+                 'particle_id': -1}
 
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
+            
         def read(line, field):
-            return float(line.split(' ')[index[field]])
+            return float(line.strip().split(' ')[index[field]])
 
         fn = grid.pf.fullplotdir[:-4] + "sink"
         with open(fn, 'r') as f:

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -105,6 +105,9 @@
         """
         Intelligently set the filename.
         """
+        if filename is None:
+            self.filename = filename
+            return
         if self.hierarchy._strip_path:
             self.filename = os.path.join(self.hierarchy.directory,
                                          os.path.basename(filename))
@@ -302,7 +305,7 @@
             LE.append(_next_token_line("GridLeftEdge", f))
             RE.append(_next_token_line("GridRightEdge", f))
             nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
-            fn.append(["-1"])
+            fn.append([None])
             if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
             npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
             if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
@@ -373,6 +376,7 @@
         giter = izip(grids, levels, procs, parents)
         bn = self._bn % (self.pf)
         pmap = [(bn % P,) for P in xrange(procs.max()+1)]
+        pmap.append((None, )) # Now, P==-1 will give None
         for grid,L,P,Pid in giter:
             grid.Level = L
             grid._parent_id = Pid
@@ -405,7 +409,10 @@
                 parents.append(g.Parent.id)
             else:
                 parents.append(-1)
-            procs.append(int(self.filenames[i][0][-4:]))
+            if self.filenames[i][0] is None:
+                procs.append(-1)
+            else:
+                procs.append(int(self.filenames[i][0][-4:]))
             levels.append(g.Level)
 
         parents = np.array(parents, dtype='int64')

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -352,14 +352,14 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter].astype(np.float32),
+                           data["particle_mass"][filter],
                            np.int64(np.where(filter)[0].size),
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -369,7 +369,7 @@
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
@@ -381,7 +381,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter].astype(np.float32),
+                           data["particle_mass"][filter],
                            num,
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -396,24 +396,24 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = np.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
     amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
                            data["particle_position_y"].astype(np.float64),
                            data["particle_position_z"].astype(np.float64),
-                           particle_field_data.astype(np.float32),
+                           particle_field_data,
                            data["particle_position_x"].size,
                            top, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     del particle_field_data
 
-    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    bottom = np.zeros(data.ActiveDimensions, dtype='float64')
     amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
                            data["particle_position_y"].astype(np.float64),
                            data["particle_position_z"].astype(np.float64),
-                           data["particle_mass"].astype(np.float32),
+                           data["particle_mass"],
                            data["particle_position_x"].size,
                            bottom, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -435,7 +435,7 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = np.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
@@ -443,18 +443,18 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                           data["particle_position_y"][filter].astype(np.float64),
                           data["particle_position_z"][filter].astype(np.float64),
-                          particle_field_data.astype(np.float32),
+                          particle_field_data,
                           np.int64(np.where(filter)[0].size),
                           top, np.array(data.LeftEdge).astype(np.float64),
                           np.array(data.ActiveDimensions).astype(np.int32), 
                           np.float64(data['dx']))
     del particle_field_data
 
-    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    bottom = np.zeros(data.ActiveDimensions, dtype='float64')
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                           data["particle_position_y"][filter].astype(np.float64),
                           data["particle_position_z"][filter].astype(np.float64),
-                          data["particle_mass"][filter].astype(np.float32),
+                          data["particle_mass"][filter],
                           np.int64(np.where(filter)[0].size),
                           bottom, np.array(data.LeftEdge).astype(np.float64),
                           np.array(data.ActiveDimensions).astype(np.int32), 

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -119,9 +119,13 @@
         files_keys = defaultdict(lambda: [])
         pf_field_list = grids[0].pf.h.field_list
         sets = [dset for dset in list(sets) if dset in pf_field_list]
-        for g in grids: files_keys[g.filename].append(g)
+        for g in grids:
+            files_keys[g.filename].append(g)
         exc = self._read_exception
         for file in files_keys:
+            # This is a funny business with Enzo files that are DM-only,
+            # where grids can have *no* data, but still exist.
+            if file is None: continue
             mylog.debug("Starting read %s (%s)", file, sets)
             nodes = [g.id for g in files_keys[file]]
             nodes.sort()

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -64,7 +64,6 @@
 translation_dict = {"x-velocity": "velx",
                     "y-velocity": "vely",
                     "z-velocity": "velz",
-                    "VelocityMagnitude": "velo",
                     "Density": "dens",
                     "Temperature": "temp",
                     "Pressure" : "pres", 

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -163,10 +163,12 @@
                         "angmomen_y",
                         "angmomen_z",
                         "mlast",
+                        "r",
                         "mdeut",
                         "n",
                         "mdot",
                         "burnstate",
+                        "luminosity",
                         "id"]
 
 for pf in _particle_field_list:
@@ -174,3 +176,18 @@
     add_field("particle_%s" % pf, function=pfunc,
               validators = [ValidateSpatial(0)],
               particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -44,6 +44,17 @@
         parses the Orion Star Particle text files
         
         """
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = grid.pf.fullplotdir + "/SinkParticles"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+        
+        # The basic fields that all sink particles have
         index = {'particle_mass': 0,
                  'particle_position_x': 1,
                  'particle_position_y': 2,
@@ -54,17 +65,39 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
+                 'particle_id': -1}
+
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
 
         def read(line, field):
-            return float(line.split(' ')[index[field]])
+            return float(line.strip().split(' ')[index[field]])
 
-        fn = grid.pf.fullplotdir + "/StarParticles"
         with open(fn, 'r') as f:
             lines = f.readlines()
             particles = []

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/pluto/api.py
--- a/yt/frontends/pluto/api.py
+++ b/yt/frontends/pluto/api.py
@@ -38,4 +38,4 @@
       add_pluto_field
 
 from .io import \
-      IOHandlerChomboHDF5
+      IOHandlerPlutoHDF5

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/pluto/data_structures.py
--- a/yt/frontends/pluto/data_structures.py
+++ b/yt/frontends/pluto/data_structures.py
@@ -99,7 +99,7 @@
 
     grid = PlutoGrid
 
-    def __init__(self,pf,data_style='chombo_hdf5'):
+    def __init__(self,pf,data_style='pluto_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
         self.data_style = data_style
@@ -187,7 +187,7 @@
     _fieldinfo_fallback = PlutoFieldInfo
     _fieldinfo_known = KnownPlutoFields
 
-    def __init__(self, filename, data_style='chombo_hdf5',
+    def __init__(self, filename, data_style='pluto_hdf5',
                  storage_filename = None, ini_filename = None):
         self._handle = h5py.File(filename,'r')
         self.current_time = self._handle.attrs['time']

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/frontends/pluto/io.py
--- a/yt/frontends/pluto/io.py
+++ b/yt/frontends/pluto/io.py
@@ -31,8 +31,8 @@
 from yt.utilities.io_handler import \
            BaseIOHandler
 
-class IOHandlerChomboHDF5(BaseIOHandler):
-    _data_style = "chombo_hdf5"
+class IOHandlerPlutoHDF5(BaseIOHandler):
+    _data_style = "pluto_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -369,6 +369,20 @@
     if ytcfg.getint("yt", cfg_option) > 0: return
     return func(*args, **kwargs)
 
+def is_root():
+    """
+    This function returns True if it is on the root processor of the
+    topcomm and False otherwise.
+    """
+    from yt.config import ytcfg
+    cfg_option = "__topcomm_parallel_rank"
+    if not ytcfg.getboolean("yt","__parallel"):
+        return True
+    if ytcfg.getint("yt", cfg_option) > 0: 
+        return False
+    return True
+
+
 #
 # Our signal and traceback handling functions
 #
@@ -603,17 +617,11 @@
     suffix = os.path.splitext(name)[1]
     return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''
 
-def mkdir_rec(path):
-    """
-    Recursive mkdir, so that if you mkdir two levels deep and the first 
-    one doesn't exist, it creates the first, and then any subsequent dirs.
 
-    Examples
-    --------
-    mkdir_rec("a/b/c")
-    """
-    dir_list = path.split("/")
-    basedir = "."
-    for dir in dir_list:
-        basedir = "%s/%s" % (basedir, dir)
-        if not os.path.isdir(basedir): os.mkdir(basedir)
+def ensure_dir_exists(path):
+    r"""Create all directories in path recursively in a parallel safe manner"""
+    my_dir = os.path.dirname(path)
+    if not my_dir:
+        return
+    if not os.path.exists(my_dir):
+        only_on_root(os.makedirs, my_dir)

diff -r b4e05d998ce72f204c7be422f839535696234615 -r 2fac5fa490905ca97b82411a4f1815d61df026db yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -76,7 +76,8 @@
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), setup = False)
+        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), 
+                           setup = False, plot_type='SlicePlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]
@@ -96,7 +97,7 @@
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
         pw = PWViewerExtJS(proj, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]),
-                           setup = False)
+                           setup = False, plot_type='ProjectionPlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/59aa6445b5f4/
Changeset:   59aa6445b5f4
Branch:      stable
User:        MatthewTurk
Date:        2013-06-03 22:39:48
Summary:     Updating to 2.5.3.
Affected #:  1 file

diff -r 2fac5fa490905ca97b82411a4f1815d61df026db -r 59aa6445b5f4a26ecb2449f913c7f2b5fee04bee setup.py
--- a/setup.py
+++ b/setup.py
@@ -155,7 +155,7 @@
 
 import setuptools
 
-VERSION = "2.5.2"
+VERSION = "2.5.3"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')


https://bitbucket.org/yt_analysis/yt/commits/80de2ef3fe6d/
Changeset:   80de2ef3fe6d
Branch:      stable
User:        MatthewTurk
Date:        2013-06-03 22:39:58
Summary:     Added tag yt-2.5.3 for changeset 59aa6445b5f4
Affected #:  1 file

diff -r 59aa6445b5f4a26ecb2449f913c7f2b5fee04bee -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5165,3 +5165,4 @@
 bd285a9a8a643ebb7b47b543e9343da84cd294c5 yt-2.5
 34a5e6774ceb26896c9d767563951d185a720774 yt-2.5.1
 2197c101413723de13e1d0dea153b182342ff719 yt-2.5.2
+59aa6445b5f4a26ecb2449f913c7f2b5fee04bee yt-2.5.3

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list