[yt-svn] commit/yt: 18 new changesets

Bitbucket commits-reply at bitbucket.org
Tue Feb 12 10:15:24 PST 2013


18 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/5a8f036ac9a4/
changeset:   5a8f036ac9a4
branch:      yt
user:        sskory
date:        2012-12-07 22:03:04
summary:     Removing time delay from halo mass function.
affected #:  1 file

diff -r 939cff5bd00cba49a175f99547fe27d3e416691a -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -132,7 +132,6 @@
         not stored in enzo datasets, so must be entered by hand.
         sigma8input=%f primordial_index=%f omega_baryon0=%f
         """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
-        time.sleep(1)
         
         # Do the calculations.
         self.sigmaM()


https://bitbucket.org/yt_analysis/yt/commits/2f5b028c4986/
changeset:   2f5b028c4986
branch:      yt
user:        sskory
date:        2012-12-07 22:03:24
summary:     Merge.
affected #:  13 files

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -458,7 +458,7 @@
 get_ytproject nose-1.2.1.tar.gz 
 get_ytproject python-hglib-0.2.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
-get_ytproject Rockstar-0.99.tar.gz
+get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,8 @@
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
     gold_standard_filename = 'gold003',
-    local_standard_filename = 'local001'
+    local_standard_filename = 'local001',
+    sketchfab_api_key = 'None'
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -38,6 +38,7 @@
 import cStringIO
 
 from yt.funcs import *
+from yt.config import ytcfg
 
 from yt.data_objects.derived_quantities import GridChildMaskWrapper
 from yt.data_objects.particle_io import particle_handler_registry
@@ -868,9 +869,11 @@
         else:
             self.fields = ensure_list(fields)
         from yt.visualization.plot_window import \
-            GetBoundsAndCenter, PWViewerMPL
+            GetWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
         pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
@@ -3809,7 +3812,9 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [grid[field].astype("float64") for field in fields]
+        g_fields = [gf.astype("float64") 
+                    if gf.dtype != "float64"
+                    else gf for gf in (grid[field] for field in fields)]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
@@ -3980,8 +3985,9 @@
 
     @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
-        fields = ensure_list(fields)
-        g_fields = [grid[field].astype("float64") for field in fields]
+        g_fields = [gf.astype("float64") 
+                    if gf.dtype != "float64"
+                    else gf for gf in (grid[field] for field in fields)]
         c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
@@ -4214,12 +4220,13 @@
         self.data_source = data_source
         self.surface_field = surface_field
         self.field_value = field_value
+        self.vertex_samples = YTFieldData()
         center = data_source.get_field_parameter("center")
         AMRData.__init__(self, center = center, fields = None, pf =
                          data_source.pf)
         self._grids = self.data_source._grids.copy()
 
-    def get_data(self, fields = None):
+    def get_data(self, fields = None, sample_type = "face"):
         if isinstance(fields, list) and len(fields) > 1:
             for field in fields: self.get_data(field)
             return
@@ -4234,7 +4241,7 @@
             pb.update(i)
             my_verts = self._extract_isocontours_from_grid(
                             g, self.surface_field, self.field_value,
-                            fields)
+                            fields, sample_type)
             if fields is not None:
                 my_verts, svals = my_verts
                 samples.append(svals)
@@ -4247,19 +4254,25 @@
             samples = np.concatenate(samples)
             samples = self.comm.par_combine_object(samples, op='cat',
                                 datatype='array')
-            self[fields] = samples
+            if sample_type == "face":
+                self[fields] = samples
+            elif sample_type == "vertex":
+                self.vertex_samples[fields] = samples
+        
 
     @restore_grid_state
     def _extract_isocontours_from_grid(self, grid, field, value,
-                                       sample_values = None):
+                                       sample_values = None,
+                                       sample_type = "face"):
         mask = self.data_source._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(field, no_ghost = False)
         if sample_values is not None:
             svals = grid.get_vertex_centered_data(sample_values)
         else:
             svals = None
+        sample_type = {"face":1, "vertex":2}[sample_type]
         my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
-                                    grid.dds, svals)
+                                    grid.dds, svals, sample_type)
         return my_verts
 
     def calculate_flux(self, field_x, field_y, field_z, fluxing_field = None):
@@ -4343,7 +4356,7 @@
                     ff, mask, grid.LeftEdge, grid.dds)
 
     def export_ply(self, filename, bounds = None, color_field = None,
-                   color_map = "algae", color_log = True):
+                   color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization
         in many different programs (e.g., MeshLab).
 
@@ -4374,63 +4387,208 @@
         >>> surf.export_ply("my_galaxy.ply", bounds = bounds)
         """
         if self.vertices is None:
-            self.get_data(color_field)
-        elif color_field is not None and color_field not in self.field_data:
-            self[color_field]
-        self._export_ply(filename, bounds, color_field, color_map, color_log)
+            self.get_data(color_field, sample_type)
+        elif color_field is not None:
+            if sample_type == "face" and \
+                color_field not in self.field_data:
+                self[color_field]
+            elif sample_type == "vertex" and \
+                color_field not in self.vertex_data:
+                self.get_data(color_field, sample_type)
+        self._export_ply(filename, bounds, color_field, color_map, color_log,
+                         sample_type)
+
+    def _color_samples(self, cs, color_log, color_map, arr):
+            if color_log: cs = np.log10(cs)
+            mi, ma = cs.min(), cs.max()
+            cs = (cs - mi) / (ma - mi)
+            from yt.visualization.image_writer import map_to_colors
+            cs = map_to_colors(cs, color_map)
+            arr["red"][:] = cs[0,:,0]
+            arr["green"][:] = cs[0,:,1]
+            arr["blue"][:] = cs[0,:,2]
 
     @parallel_root_only
     def _export_ply(self, filename, bounds = None, color_field = None,
-                   color_map = "algae", color_log = True):
-        f = open(filename, "wb")
+                   color_map = "algae", color_log = True, sample_type = "face"):
+        if isinstance(filename, file):
+            f = filename
+        else:
+            f = open(filename, "wb")
         if bounds is None:
             DLE = self.pf.domain_left_edge
             DRE = self.pf.domain_right_edge
             bounds = [(DLE[i], DRE[i]) for i in range(3)]
+        nv = self.vertices.shape[1]
+        vs = [("x", "<f"), ("y", "<f"), ("z", "<f"),
+              ("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
         fs = [("ni", "uint8"), ("v1", "<i4"), ("v2", "<i4"), ("v3", "<i4"),
               ("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
-        v = np.empty((self.vertices.shape[1], 3), "<f")
-        for i in range(3):
-            v[:,i] = self.vertices[i,:]
-            np.subtract(v[:,i], bounds[i][0], v[:,i])
-            w = bounds[i][1] - bounds[i][0]
-            np.divide(v[:,i], w, v[:,i])
-            np.subtract(v[:,i], 0.5, v[:,i]) # Center at origin.
         f.write("ply\n")
         f.write("format binary_little_endian 1.0\n")
-        f.write("element vertex %s\n" % (v.shape[0]))
+        f.write("element vertex %s\n" % (nv))
         f.write("property float x\n")
         f.write("property float y\n")
         f.write("property float z\n")
-        f.write("element face %s\n" % (v.shape[0]/3))
+        if color_field is not None and sample_type == "vertex":
+            f.write("property uchar red\n")
+            f.write("property uchar green\n")
+            f.write("property uchar blue\n")
+            v = np.empty(self.vertices.shape[1], dtype=vs)
+            cs = self.vertex_samples[color_field]
+            self._color_samples(cs, color_log, color_map, v)
+        else:
+            v = np.empty(self.vertices.shape[1], dtype=vs[:3])
+        f.write("element face %s\n" % (nv/3))
         f.write("property list uchar int vertex_indices\n")
-        if color_field is not None:
+        if color_field is not None and sample_type == "face":
             f.write("property uchar red\n")
             f.write("property uchar green\n")
             f.write("property uchar blue\n")
             # Now we get our samples
             cs = self[color_field]
-            if color_log: cs = np.log10(cs)
-            mi, ma = cs.min(), cs.max()
-            cs = (cs - mi) / (ma - mi)
-            from yt.visualization.image_writer import map_to_colors
-            cs = map_to_colors(cs, color_map)
-            arr = np.empty(cs.shape[1], dtype=np.dtype(fs))
-            arr["red"][:] = cs[0,:,0]
-            arr["green"][:] = cs[0,:,1]
-            arr["blue"][:] = cs[0,:,2]
+            arr = np.empty(cs.shape[0], dtype=np.dtype(fs))
+            self._color_samples(cs, color_log, color_map, arr)
         else:
-            arr = np.empty(v.shape[0]/3, np.dtype(fs[:-3]))
+            arr = np.empty(nv/3, np.dtype(fs[:-3]))
+        for i, ax in enumerate("xyz"):
+            # Do the bounds first since we cast to f32
+            tmp = self.vertices[i,:]
+            np.subtract(tmp, bounds[i][0], tmp)
+            w = bounds[i][1] - bounds[i][0]
+            np.divide(tmp, w, tmp)
+            np.subtract(tmp, 0.5, tmp) # Center at origin.
+            v[ax][:] = tmp 
         f.write("end_header\n")
         v.tofile(f)
         arr["ni"][:] = 3
-        vi = np.arange(v.shape[0], dtype="<i")
-        vi.shape = (v.shape[0]/3, 3)
+        vi = np.arange(nv, dtype="<i")
+        vi.shape = (nv/3, 3)
         arr["v1"][:] = vi[:,0]
         arr["v2"][:] = vi[:,1]
         arr["v3"][:] = vi[:,2]
         arr.tofile(f)
-        f.close()
+        if filename is not f:
+            f.close()
+
+    def export_sketchfab(self, title, description, api_key = None,
+                            color_field = None, color_map = "algae",
+                            color_log = True, bounds = None):
+        r"""This exports Surfaces to SketchFab.com, where they can be viewed
+        interactively in a web browser.
+
+        SketchFab.com is a proprietary web service that provides WebGL
+        rendering of models.  This routine will use temporary files to
+        construct a compressed binary representation (in .PLY format) of the
+        Surface and any optional fields you specify and upload it to
+        SketchFab.com.  It requires an API key, which can be found on your
+        SketchFab.com dashboard.  You can either supply the API key to this
+        routine directly or you can place it in the variable
+        "sketchfab_api_key" in your ~/.yt/config file.  This function is
+        parallel-safe.
+
+        Parameters
+        ----------
+        title : string
+            The title for the model on the website
+        description : string
+            How you want the model to be described on the website
+        api_key : string
+            Optional; defaults to using the one in the config file
+        color_field : string
+            If specified, the field by which the surface will be colored
+        color_map : string
+            The name of the color map to use to map the color field
+        color_log : bool
+            Should the field be logged before being mapped to RGB?
+        bounds : list of tuples
+            [ (xmin, xmax), (ymin, ymax), (zmin, zmax) ] within which the model
+            will be scaled and centered.  Defaults to the full domain.
+
+        Returns
+        -------
+        URL : string
+            The URL at which your model can be viewed.
+
+        Examples
+        --------
+
+        from yt.mods import *
+        pf = load("redshift0058")
+        dd = pf.h.sphere("max", (200, "kpc"))
+        rho = 5e-27
+
+        bounds = [(dd.center[i] - 100.0/pf['kpc'],
+                   dd.center[i] + 100.0/pf['kpc']) for i in range(3)]
+
+        surf = pf.h.surface(dd, "Density", rho)
+
+        rv = surf.export_sketchfab(
+            title = "Testing Upload",
+            description = "A simple test of the uploader",
+            color_field = "Temperature",
+            color_map = "hot",
+            color_log = True,
+            bounds = bounds
+        )
+        """
+        api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
+        if api_key in (None, "None"):
+            raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")
+        import zipfile, json
+        from tempfile import TemporaryFile
+
+        ply_file = TemporaryFile()
+        self.export_ply(ply_file, bounds, color_field, color_map, color_log,
+                        sample_type = "vertex")
+        ply_file.seek(0)
+        # Greater than ten million vertices and we throw an error but dump
+        # to a file.
+        if self.vertices.shape[1] > 1e7:
+            tfi = 0
+            fn = "temp_model_%03i.ply" % tfi
+            while os.path.exists(fn):
+                fn = "temp_model_%03i.ply" % tfi
+                tfi += 1
+            open(fn, "wb").write(ply_file.read())
+            raise YTTooManyVertices(self.vertices.shape[1], fn)
+
+        zfs = TemporaryFile()
+        with zipfile.ZipFile(zfs, "w", zipfile.ZIP_DEFLATED) as zf:
+            zf.writestr("yt_export.ply", ply_file.read())
+        zfs.seek(0)
+
+        zfs.seek(0)
+        data = {
+            'title': title,
+            'token': api_key,
+            'description': description,
+            'fileModel': zfs,
+            'filenameModel': "yt_export.zip",
+        }
+        upload_id = self._upload_to_sketchfab(data)
+        upload_id = self.comm.mpi_bcast(upload_id, root = 0)
+        return upload_id
+
+    @parallel_root_only
+    def _upload_to_sketchfab(self, data):
+        import urllib2, json
+        from yt.utilities.poster.encode import multipart_encode
+        from yt.utilities.poster.streaminghttp import register_openers
+        register_openers()
+        datamulti, headers = multipart_encode(data)
+        request = urllib2.Request("https://api.sketchfab.com/v1/models",
+                        datamulti, headers)
+        rv = urllib2.urlopen(request).read()
+        rv = json.loads(rv)
+        upload_id = rv.get("result", {}).get("id", None)
+        if upload_id:
+            mylog.info("Model uploaded to: https://sketchfab.com/show/%s",
+                       upload_id)
+        else:
+            mylog.error("Problem uploading.")
+        return upload_id
+
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -153,7 +153,7 @@
         """
         Returns a single field.  Will add if necessary.
         """
-        if not self.field_data.has_key(key):
+        if key not in self.field_data:
             self.get_data(key)
         return self.field_data[key]
 

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/data_objects/tests/test_fluxes.py
--- a/yt/data_objects/tests/test_fluxes.py
+++ b/yt/data_objects/tests/test_fluxes.py
@@ -12,3 +12,10 @@
     flux = surf.calculate_flux("Ones", "Zeros", "Zeros", "Ones")
     yield assert_almost_equal, flux, 1.0, 12
 
+def test_sampling():
+    pf = fake_random_pf(64, nprocs = 4)
+    dd = pf.h.all_data()
+    for i, ax in enumerate('xyz'):
+        surf = pf.h.surface(dd, ax, 0.51)
+        surf.get_data(ax, "vertex")
+        yield assert_equal, surf.vertex_samples[ax], surf.vertices[i,:]

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,9 +1,14 @@
 from yt.testing import *
+import os
 
 def setup():
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
 def test_projection():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
@@ -22,6 +27,7 @@
             xax = x_dict[ax]
             yax = y_dict[ax]
             for wf in ["Density", None]:
+                fns = []
                 proj = pf.h.proj(ax, ["Ones", "Density"], weight_field = wf)
                 yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
                 yield assert_equal, proj["Ones"].min(), 1.0
@@ -30,6 +36,8 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                pw = proj.to_pw()
+                fns += pw.save()
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
                     yield assert_equal, frb[proj_field].info['data_source'], \
@@ -50,6 +58,7 @@
                             proj.center
                     yield assert_equal, frb[proj_field].info['weight_field'], \
                             wf
+                teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -1,9 +1,14 @@
 from yt.testing import *
+import os
 
 def setup():
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
 def test_slice():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
@@ -21,6 +26,7 @@
             xax = x_dict[ax]
             yax = y_dict[ax]
             for wf in ["Density", None]:
+                fns = []
                 slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
                 yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
                 yield assert_equal, slc["Ones"].min(), 1.0
@@ -29,6 +35,8 @@
                 yield assert_equal, np.unique(slc["py"]), uc[yax]
                 yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                pw = slc.to_pw()
+                fns += pw.save()
                 frb = slc.to_frb((1.0,'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \
@@ -49,7 +57,7 @@
                             slc.center
                     yield assert_equal, frb[slc_field].info['coord'], \
                             slc_pos
+                teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
 
-

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -181,8 +181,10 @@
         mylog.debug("Finished read of %s", sets)
 
     def _read_data_set(self, grid, field):
-        return self.modify(hdf5_light_reader.ReadData(grid.filename,
-                "/Grid%08i/%s" % (grid.id, field)))
+        tr = hdf5_light_reader.ReadData(grid.filename,
+                "/Grid%08i/%s" % (grid.id, field))
+        if tr.dtype == "float32": tr = tr.astype("float64")
+        return self.modify(tr)
 
     def _read_data_slice(self, grid, field, axis, coord):
         axis = _axis_ids[axis]

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -189,3 +189,22 @@
     def __str__(self):
         return "Enzo test output file (OutputLog) not generated for: " + \
             "'%s'" % (self.testname) + ".\nTest did not complete."
+
+class YTNoAPIKey(YTException):
+    def __init__(self, service, config_name):
+        self.service = service
+        self.config_name = config_name
+
+    def __str__(self):
+        return "You need to set an API key for %s in ~/.yt/config as %s" % (
+            self.service, self.config_name)
+
+class YTTooManyVertices(YTException):
+    def __init__(self, nv, fn):
+        self.nv = nv
+        self.fn = fn
+
+    def __str__(self):
+        s = "There are too many vertices (%s) to upload to Sketchfab. " % (self.nv)
+        s += "Your model has been saved as %s .  You should upload manually." % (self.fn)
+        return s

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/utilities/lib/marching_cubes.pyx
--- a/yt/utilities/lib/marching_cubes.pyx
+++ b/yt/utilities/lib/marching_cubes.pyx
@@ -33,7 +33,7 @@
 cdef struct Triangle:
     Triangle *next
     np.float64_t p[3][3]
-    np.float64_t val
+    np.float64_t val[3] # Usually only use one value
 
 cdef struct TriangleCollection:
     int count
@@ -64,12 +64,14 @@
     return count
 
 cdef void FillTriangleValues(np.ndarray[np.float64_t, ndim=1] values,
-                             Triangle *first):
+                             Triangle *first, int nskip = 1):
     cdef Triangle *this = first
     cdef Triangle *last
     cdef int i = 0
+    cdef int j
     while this != NULL:
-        values[i] = this.val
+        for j in range(nskip):
+            values[i*nskip + j] = this.val[j]
         i += 1
         last = this
         this = this.next
@@ -463,7 +465,7 @@
                      np.ndarray[np.int32_t, ndim=3] mask,
                      np.ndarray[np.float64_t, ndim=1] left_edge,
                      np.ndarray[np.float64_t, ndim=1] dxs,
-                     obj_sample = None):
+                     obj_sample = None, int sample_type = 1):
     cdef int dims[3]
     cdef int i, j, k, n, m, nt
     cdef int offset
@@ -478,7 +480,7 @@
     if obj_sample is not None:
         sample = obj_sample
         sdata = <np.float64_t *> sample.data
-        do_sample = 1
+        do_sample = sample_type # 1 for face, 2 for vertex
     else:
         do_sample = 0
     for i in range(3):
@@ -502,13 +504,16 @@
                     offset_fill(dims, intdata, gv)
                     nt = march_cubes(gv, isovalue, dds, pos[0], pos[1], pos[2],
                                 &triangles)
-                    if do_sample == 1 and nt > 0:
+                    if nt == 0 or do_sample == 0:
+                        pos[2] += dds[2]
+                        continue
+                    if last == NULL and triangles.first != NULL:
+                        current = triangles.first
+                        last = NULL
+                    elif last != NULL:
+                        current = last.next
+                    if do_sample == 1:
                         # At each triangle's center, sample our secondary field
-                        if last == NULL and triangles.first != NULL:
-                            current = triangles.first
-                            last = NULL
-                        elif last != NULL:
-                            current = last.next
                         while current != NULL:
                             for n in range(3):
                                 point[n] = 0.0
@@ -517,24 +522,38 @@
                                     point[m] += (current.p[n][m]-pos[m])*idds[m]
                             for n in range(3):
                                 point[n] /= 3.0
-                            current.val = offset_interpolate(dims, point,
+                            current.val[0] = offset_interpolate(dims, point,
                                                              sdata + offset)
                             last = current
                             if current.next == NULL: break
                             current = current.next
+                    elif do_sample == 2:
+                        while current != NULL:
+                            for n in range(3):
+                                for m in range(3):
+                                    point[m] = (current.p[n][m]-pos[m])*idds[m]
+                                current.val[n] = offset_interpolate(dims,
+                                                    point, sdata + offset)
+                            last = current
+                            if current.next == NULL: break
+                            current = current.next
                 pos[2] += dds[2]
             pos[1] += dds[1]
         pos[0] += dds[0]
     # Hallo, we are all done.
     cdef np.ndarray[np.float64_t, ndim=2] vertices 
     vertices = np.zeros((triangles.count*3,3), dtype='float64')
+    if do_sample == 0:
+        FillAndWipeTriangles(vertices, triangles.first)
+    cdef int nskip
     if do_sample == 1:
-        sampled = np.zeros(triangles.count, dtype='float64')
-        FillTriangleValues(sampled, triangles.first)
-        FillAndWipeTriangles(vertices, triangles.first)
-        return vertices, sampled
+        nskip = 1
+    elif do_sample == 2:
+        nskip = 3
+    sampled = np.zeros(triangles.count * nskip, dtype='float64')
+    FillTriangleValues(sampled, triangles.first, nskip)
     FillAndWipeTriangles(vertices, triangles.first)
-    return vertices
+    return vertices, sampled
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -252,9 +252,10 @@
     @wraps(func)
     def root_only(*args, **kwargs):
         comm = _get_comm(args)
+        rv = None
         if comm.rank == 0:
             try:
-                func(*args, **kwargs)
+                rv = func(*args, **kwargs)
                 all_clear = 1
             except:
                 traceback.print_last()
@@ -263,6 +264,7 @@
             all_clear = None
         all_clear = comm.mpi_bcast(all_clear)
         if not all_clear: raise RuntimeError
+        return rv
     if parallel_capable: return root_only
     return func
 

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -36,7 +36,6 @@
 from yt.data_objects.profiles import \
     BinnedProfile1D, \
     BinnedProfile2D
-from .plot_types import ProfilePlot, PhasePlot
 from .tick_locators import LogLocator, LinearLocator
 from yt.utilities.logger import ytLogger as mylog
 

diff -r 5a8f036ac9a4788d5eafa65a57d026b63a4825bf -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -1,7 +1,7 @@
 from yt.testing import *
 from yt.mods import SlicePlot, ProjectionPlot, \
     OffAxisSlicePlot, OffAxisProjectionPlot
-import glob, os
+import os
 
 def setup():
     from yt.config import ytcfg


https://bitbucket.org/yt_analysis/yt/commits/f0ccebb68031/
changeset:   f0ccebb68031
branch:      yt
user:        sskory
date:        2012-12-13 17:44:23
summary:     Merge.
affected #:  9 files

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -61,7 +61,7 @@
     ipython_notebook = 'False',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold003',
+    gold_standard_filename = 'gold004',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -280,7 +280,7 @@
             return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
-    def get_field_parameter(self, param):
+    def get_field_parameter(self, param, default = None):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
             return np.random.random(3) * 1e-2

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,9 +258,12 @@
         ...     SlicePlot(pf, "x", "Density").save()
 
         """
+        
         if isinstance(filenames, types.StringTypes):
             filenames = glob.glob(filenames)
             filenames.sort()
+        if len(filenames) == 0:
+            raise YTOutputNotIdentified(filenames, {})
         obj = cls(filenames[:], parallel = parallel, **kwargs)
         return obj
 

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -450,6 +450,8 @@
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
     def __del__(self):
+        if self._handle is not self._particle_handle:
+            self._particle_handle.close()
         self._handle.close()
 
     @classmethod

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -248,6 +248,8 @@
             fileh = h5py.File(args[0],'r')
             if "gridded_data_format" in fileh:
                 return True
+                fileh.close()
+            fileh.close()
         except:
             pass
         return False

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -591,19 +591,6 @@
             print
             loki = raw_input("Press enter to go on, Ctrl-C to exit.")
             cedit.config.setoption(uu, hgrc_path, "bb.username=%s" % bbusername)
-        bb_fp = "81:2b:08:90:dc:d3:71:ee:e0:7c:b4:75:ce:9b:6c:48:94:56:a1:fe"
-        if uu.config("hostfingerprints", "bitbucket.org", None) is None:
-            print "Let's also add bitbucket.org to the known hosts, so hg"
-            print "doesn't warn us about bitbucket."
-            print "We will add this:"
-            print
-            print "   [hostfingerprints]"
-            print "   bitbucket.org = %s" % (bb_fp)
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            cedit.config.setoption(uu, hgrc_path,
-                                   "hostfingerprints.bitbucket.org=%s" % bb_fp)
-
         # We now reload the UI's config file so that it catches the [bb]
         # section changes.
         uu.readconfig(hgrc_path[0])

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -330,7 +330,8 @@
         ParticleGenerator.__init__(self, pf, num_particles, field_list)
 
         num_cells = len(data_source["x"].flat)
-        max_density = data_source[density_field].max()
+        max_mass = (data_source[density_field]*
+                    data_source["CellVolume"]).max()
         num_particles_left = num_particles
         all_x = []
         all_y = []
@@ -341,12 +342,13 @@
         
         while num_particles_left > 0:
 
-            rho = np.random.uniform(high=1.01*max_density,
-                                    size=num_particles_left)
+            m = np.random.uniform(high=1.01*max_mass,
+                                  size=num_particles_left)
             idxs = np.random.random_integers(low=0, high=num_cells-1,
                                              size=num_particles_left)
-            rho_true = data_source[density_field].flat[idxs]
-            accept = rho <= rho_true
+            m_true = (data_source[density_field]*
+                      data_source["CellVolume"]).flat[idxs]
+            accept = m <= m_true
             num_accepted = accept.sum()
             accepted_idxs = idxs[accept]
             

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -658,24 +658,25 @@
 
         xf = axis_names[px_index]
         yf = axis_names[py_index]
+        dxf = "d%s" % xf
+        dyf = "d%s" % yf
 
         DomainRight = plot.data.pf.domain_right_edge
         DomainLeft = plot.data.pf.domain_left_edge
         DomainWidth = DomainRight - DomainLeft
-        
+
         nx, ny = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
-
             xf_copy = clump[xf].copy()
             yf_copy = clump[yf].copy()
-            
-            temp = _MPL.Pixelize(xf_copy, yf_copy, 
-                                 clump['dx']/2.0,
-                                 clump['dy']/2.0,
-                                 clump['dx']*0.0+i+1, # inits inside Pixelize
+
+            temp = _MPL.Pixelize(xf_copy, yf_copy,
+                                 clump[dxf]/2.0,
+                                 clump[dyf]/2.0,
+                                 clump[dxf]*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)

diff -r 2f5b028c4986ab3c997b169ff4ba29b454fb7388 -r f0ccebb68031035e4222a351850072a19bf6fb09 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1059,7 +1059,7 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        slc = pf.h.slice(axis, center[axis], fields=fields)
+        slc = pf.h.slice(axis, center[axis], center=center, fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
 


https://bitbucket.org/yt_analysis/yt/commits/e82d96149133/
changeset:   e82d96149133
branch:      yt
user:        sskory
date:        2012-12-15 00:37:04
summary:     Merge.
affected #:  4 files

diff -r f0ccebb68031035e4222a351850072a19bf6fb09 -r e82d961491336f9c079fd974af9ed5aa461d1f37 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -399,6 +399,14 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function get_ytdata
+{
+    echo "Downloading $1 from yt-project.org"
+    [ -e $1 ] && return
+    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -407,6 +415,13 @@
     exit 1
 fi
 
+# Get supplemental data.
+
+mkdir -p ${DEST_DIR}/data
+cd ${DEST_DIR}/data
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
+get_ytdata xray_emissivity.h5
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
@@ -706,7 +721,7 @@
 then
     if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar] && tar xfz rockstar-0.99.6.tar.gz
+        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
         echo "Building Rockstar"
         cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r f0ccebb68031035e4222a351850072a19bf6fb09 -r e82d961491336f9c079fd974af9ed5aa461d1f37 yt/analysis_modules/spectral_integrator/api.py
--- a/yt/analysis_modules/spectral_integrator/api.py
+++ b/yt/analysis_modules/spectral_integrator/api.py
@@ -30,4 +30,8 @@
 
 from .spectral_frequency_integrator import \
     SpectralFrequencyIntegrator, \
-    create_table_from_textfiles
+    create_table_from_textfiles, \
+    EmissivityIntegrator, \
+    add_xray_emissivity_field, \
+    add_xray_luminosity_field, \
+    add_xray_photon_emissivity_field

diff -r f0ccebb68031035e4222a351850072a19bf6fb09 -r e82d961491336f9c079fd974af9ed5aa461d1f37 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -4,9 +4,11 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittons at origins.colorado.edu>
+Affiliation: Michigan State University
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2007-2012 Matthew Turk.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -24,16 +26,20 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+from exceptions import IOError
+import h5py
 import numpy as np
+import os
 
 from yt.funcs import *
 
 from yt.data_objects.field_info_container import add_field
+from yt.utilities.exceptions import YTException
 from yt.utilities.linear_interpolators import \
-    UnilinearFieldInterpolator, \
-    BilinearFieldInterpolator, \
-    TrilinearFieldInterpolator
+    BilinearFieldInterpolator
 
+xray_data_version = 1
+    
 class SpectralFrequencyIntegrator(object):
     def __init__(self, table, field_names,
                  bounds, ev_bounds):
@@ -80,8 +86,8 @@
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
-                        units=r"\rm{ergs}\/\rm{cm}^{-3}\/\rm{s}^{-1}",
-                        projected_units=r"\rm{ergs}\/\rm{cm}^{-2}\/\rm{s}^{-1}")
+                        units=r"\rm{ergs}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+                        projected_units=r"\rm{ergs}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
         return name
 
 def create_table_from_textfiles(pattern, rho_spec, e_spec, T_spec):
@@ -100,3 +106,304 @@
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]
     return table
 
+class EnergyBoundsException(YTException):
+    def __init__(self, lower, upper):
+        self.lower = lower
+        self.upper = upper
+
+    def __str__(self):
+        return "Energy bounds are %e to %e keV." % \
+          (self.lower, self.upper)
+
+class ObsoleteDataException(YTException):
+    def __str__(self):
+        return "X-ray emissivity data is out of data.\nDownload the latest data from http://yt-project.org/data/xray_emissivity.h5 and move it to %s." % \
+          os.path.join(os.environ["YT_DEST"], "data", "xray_emissivity.h5")
+          
+class EmissivityIntegrator(object):
+    r"""Class for making X-ray emissivity fields with hdf5 data tables 
+    from Cloudy.
+    """
+    def __init__(self, filename=None):
+        r"""Initialize an EmissivityIntegrator object.
+
+        Keyword Parameters
+        ------------------
+        filename: string
+            Path to data file containing emissivity values.  If None,
+            a file called xray_emissivity.h5 is used.  This file contains 
+            emissivity tables for primordial elements and for metals at 
+            solar metallicity for the energy range 0.1 to 100 keV.
+            Default: None.
+            
+        """
+
+        default_filename = False
+        if filename is None:
+            filename = os.path.join(os.environ["YT_DEST"], 
+                                    "data", "xray_emissivity.h5")
+            default_filename = True
+
+        if not os.path.exists(filename):
+            raise IOError("File does not exist: %s." % filename)
+        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
+        in_file = h5py.File(filename, "r")
+        if "info" in in_file.attrs:
+            only_on_root(mylog.info, in_file.attrs["info"])
+        if default_filename and \
+          in_file.attrs["version"] < xray_data_version:
+            raise ObsoleteDataException()
+        else:
+            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
+                         in_file.attrs["version"])
+
+        for field in ["emissivity_primordial", "emissivity_metals",
+                      "log_nH", "log_T", "log_E"]:
+            setattr(self, field, in_file[field][:])
+        in_file.close()
+
+        E_diff = np.diff(self.log_E)
+        self.E_bins = \
+                  np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
+                                               [self.log_E[-1] - 0.5 * E_diff[-1],
+                                                self.log_E[-1] + 0.5 * E_diff[-1]]]))
+        self.dnu = 2.41799e17 * np.diff(self.E_bins)
+
+    def _get_interpolator(self, data, e_min, e_max):
+        r"""Create an interpolator for total emissivity in a 
+        given energy range.
+
+        Parameters
+        ----------
+        e_min: float
+            the minimum energy in keV for the energy band.
+        e_min: float
+            the maximum energy in keV for the energy band.
+
+        """
+        if (e_min - self.E_bins[0]) / e_min < -1e-3 or \
+          (e_max - self.E_bins[-1]) / e_max > 1e-3:
+            raise EnergyBoundsException(np.power(10, self.E_bins[0]),
+                                        np.power(10, self.E_bins[-1]))
+        e_is, e_ie = np.digitize([e_min, e_max], self.E_bins)
+        e_is = np.clip(e_is - 1, 0, self.E_bins.size - 1)
+        e_ie = np.clip(e_ie, 0, self.E_bins.size - 1)
+
+        my_dnu = np.copy(self.dnu[e_is: e_ie])
+        # clip edge bins if the requested range is smaller
+        my_dnu[0] -= e_min - self.E_bins[e_is]
+        my_dnu[-1] -= self.E_bins[e_ie] - e_max
+
+        interp_data = (data[..., e_is:e_ie] * my_dnu).sum(axis=-1)
+        return BilinearFieldInterpolator(np.log10(interp_data),
+                                         [self.log_nH[0], self.log_nH[-1],
+                                          self.log_T[0],  self.log_T[-1]],
+                                         ["log_nH", "log_T"], truncate=True)
+
+def add_xray_emissivity_field(e_min, e_max, filename=None,
+                              with_metals=True,
+                              constant_metallicity=None):
+    r"""Create an X-ray emissivity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Emissivity_{e_min}_{e_max}keV".
+    The units of the field are erg s^-1 cm^-3.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_emissivity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> p.save()
+
+    """
+
+    my_si = EmissivityIntegrator(filename=filename)
+
+    em_0 = my_si._get_interpolator(my_si.emissivity_primordial, e_min, e_max)
+    em_Z = None
+    if with_metals or constant_metallicity is not None:
+        em_Z = my_si._get_interpolator(my_si.emissivity_metals, e_min, e_max)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
+              "log_T"   : np.log10(data["Temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if em_Z is not None:
+            if with_metals:
+                my_Z = data["Metallicity"]
+            elif constant_metallicity is not None:
+                my_Z = constant_metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["H_NumberDensity"]**2 * my_emissivity
+
+    field_name = "Xray_Emissivity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_emissivity_field,
+              projection_conversion="cm",
+              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{erg}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+              projected_units=r"\rm{erg}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+    return field_name
+
+def add_xray_luminosity_field(e_min, e_max, filename=None,
+                              with_metals=True,
+                              constant_metallicity=None):
+    r"""Create an X-ray luminosity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Luminosity_{e_min}_{e_max}keV".
+    The units of the field are erg s^-1.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_luminosity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> sp = pf.h.sphere('max', (2., 'mpc'))
+    >>> print sp.quantities['TotalQuantity']('Xray_Luminosity_0.5_2keV')
+    
+    """
+
+    em_field = add_xray_emissivity_field(e_min, e_max, filename=filename,
+                                         with_metals=with_metals,
+                                         constant_metallicity=constant_metallicity)
+
+    def _luminosity_field(field, data):
+        return data[em_field] * data["CellVolume"]
+    field_name = "Xray_Luminosity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_luminosity_field,
+              display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{erg}\ \rm{s}^{-1}")
+    return field_name
+
+def add_xray_photon_emissivity_field(e_min, e_max, filename=None,
+                                     with_metals=True,
+                                     constant_metallicity=None):
+    r"""Create an X-ray photon emissivity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Photon_Emissivity_{e_min}_{e_max}keV".
+    The units of the field are photons s^-1 cm^-3.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_emissivity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> p.save()
+
+    """
+
+    my_si = EmissivityIntegrator(filename=filename)
+    energy_erg = np.power(10, my_si.log_E) * 1.60217646e-9
+
+    em_0 = my_si._get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
+                                   e_min, e_max)
+    em_Z = None
+    if with_metals or constant_metallicity is not None:
+        em_Z = my_si._get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
+                                       e_min, e_max)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
+              "log_T"   : np.log10(data["Temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if em_Z is not None:
+            if with_metals:
+                my_Z = data["Metallicity"]
+            elif constant_metallicity is not None:
+                my_Z = constant_metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["H_NumberDensity"]**2 * my_emissivity
+
+    field_name = "Xray_Photon_Emissivity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_emissivity_field,
+              projection_conversion="cm",
+              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{photons}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+              projected_units=r"\rm{photons}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+    return field_name

diff -r f0ccebb68031035e4222a351850072a19bf6fb09 -r e82d961491336f9c079fd974af9ed5aa461d1f37 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -506,11 +506,11 @@
 def _CellVolume(field, data):
     if data['dx'].size == 1:
         try:
-            return data['dx']*data['dy']*data['dx']*\
+            return data['dx'] * data['dy'] * data['dz'] * \
                 np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
-            return data['dx']*data['dy']*data['dx']
-    return data["dx"]*data["dy"]*data["dz"]
+            return data['dx'] * data['dy'] * data['dz']
+    return data["dx"] * data["dy"] * data["dz"]
 def _ConvertCellVolumeMpc(data):
     return data.convert("mpc")**3.0
 def _ConvertCellVolumeCGS(data):


https://bitbucket.org/yt_analysis/yt/commits/f3bb618f5a7a/
changeset:   f3bb618f5a7a
branch:      yt
user:        sskory
date:        2012-12-18 17:01:26
summary:     Merge.
affected #:  13 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/f2a4af62d852/
changeset:   f2a4af62d852
branch:      yt
user:        sskory
date:        2012-12-21 17:03:29
summary:     Merge.
affected #:  8 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/e239cc876094/
changeset:   e239cc876094
branch:      yt
user:        sskory
date:        2012-12-27 22:58:52
summary:     Merge
affected #:  7 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/748a991b04f9/
changeset:   748a991b04f9
branch:      yt
user:        sskory
date:        2013-01-06 15:38:44
summary:     Merge.
affected #:  13 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/dd49fb38eda6/
changeset:   dd49fb38eda6
branch:      yt
user:        sskory
date:        2013-01-11 20:41:46
summary:     Merge.
affected #:  6 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/0622634b51bc/
changeset:   0622634b51bc
branch:      yt
user:        sskory
date:        2013-01-11 20:51:05
summary:     Some missing attributes for LoadedHalos.
affected #:  1 file
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/a602faab7738/
changeset:   a602faab7738
branch:      yt
user:        sskory
date:        2013-01-23 21:40:56
summary:     Merge.
affected #:  15 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/54243f643880/
changeset:   54243f643880
branch:      yt
user:        sskory
date:        2013-01-29 22:32:41
summary:     Merge.
affected #:  8 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/086a45e3a01a/
changeset:   086a45e3a01a
branch:      yt
user:        sskory
date:        2013-02-08 21:53:22
summary:     Fixing the center of mass calculation for halos.
affected #:  1 file
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/4b3e48b5d3cf/
changeset:   4b3e48b5d3cf
branch:      yt
user:        sskory
date:        2013-02-08 21:53:38
summary:     Merge.
affected #:  41 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/160e80992381/
changeset:   160e80992381
branch:      yt
user:        sskory
date:        2013-02-08 22:41:36
summary:     Changes suggested by Britton.
affected #:  1 file
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/6c75bbac3629/
changeset:   6c75bbac3629
branch:      yt
user:        sskory
date:        2013-02-08 23:38:52
summary:     Making the periodicity stuff for center_of_mass clearer.
affected #:  2 files
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/bc7f37541e66/
changeset:   bc7f37541e66
branch:      yt
user:        sskory
date:        2013-02-08 23:41:55
summary:     Accidentally commited some work in progress stuff. Removing.
affected #:  1 file
Diff not available.

https://bitbucket.org/yt_analysis/yt/commits/d5e52722bcf5/
changeset:   d5e52722bcf5
branch:      yt
user:        MatthewTurk
date:        2013-02-12 19:15:16
summary:     Merged in sskory/yt (pull request #420)

Halo center of mass fix
affected #:  3 files
Diff not available.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list