[yt-svn] commit/yt: 3 new changesets

Bitbucket commits-noreply at bitbucket.org
Wed Jan 18 10:28:48 PST 2012


3 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/8138c1d5540d/
changeset:   8138c1d5540d
branch:      yt
user:        jzuhone
date:        2012-01-18 07:28:39
summary:     KnownFLASHFields was not getting filled with anything, because we were adding fields only to FLASHFieldInfo. Since KnownFLASHFields was empty, none of the field definitions in here stuck when the parameter file was actually open, and the definitions were simply overridden. This fixes this behavior so that for the common fields listed here we get the right units.

The "grac" field was also removed since it is a temporary field generated during the simulation and is not used for analysis.
affected #:  1 file

diff -r 74e0a0d08c10028a5e1a95864cbfd0459250dc1b -r 8138c1d5540dac5d125c85d1ff477f6fa09831f7 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -33,7 +33,6 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -110,36 +109,78 @@
         return data.convert(fname)
     return _conv
 
-add_field("dens", function=lambda a,b: None, take_log=True,
-          convert_function=_get_convert("dens"),
-          units=r"\rm{g}/\rm{cm}^3")
-add_field("xvel", function=lambda a,b: None, take_log=False,
-          convert_function=_get_convert("xvel"),
-          units=r"\rm{cm}/\rm{s}")
-add_field("yvel", function=lambda a,b: None, take_log=False,
-          convert_function=_get_convert("yvel"),
-          units=r"\rm{cm}/\rm{s}")
-add_field("zvel", function=lambda a,b: None, take_log=False,
-          convert_function=_get_convert("zvel"),
-          units=r"\rm{cm}/\rm{s}")
-add_field("particle_xvel", function=lambda a,b: None, take_log=False,
-          convert_function=_get_convert("particle_xvel"),
-          units=r"\rm{cm}/\rm{s}")
-add_field("particle_yvel", function=lambda a,b: None, take_log=False,
-          convert_function=_get_convert("particle_yvel"),
-          units=r"\rm{cm}/\rm{s}")
-add_field("particle_zvel", function=lambda a,b: None, take_log=False,
-          convert_function=_get_convert("particle_zvel"),
-          units=r"\rm{cm}/\rm{s}")
-add_field("temp", function=lambda a,b: None, take_log=True,
-          convert_function=_get_convert("temp"),
-          units=r"\rm{K}")
-add_field("pres", function=lambda a,b: None, take_log=True,
-          convert_function=_get_convert("pres"),
-          units=r"\rm{unknown}")
+add_flash_field("dens", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")
+add_flash_field("velx", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("velx"),
+                units=r"\rm{cm}/\rm{s}")
+add_flash_field("vely", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("vely"),
+                units=r"\rm{cm}/\rm{s}")
+add_flash_field("velz", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("velz"),
+                units=r"\rm{cm}/\rm{s}")
+add_flash_field("particle_posx", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("particle_posx"),
+                units=r"\rm{cm}")
+add_flash_field("particle_posy", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("particle_posy"),
+                units=r"\rm{cm}")
+add_flash_field("particle_posz", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("particle_posz"),
+                units=r"\rm{cm}")
+add_flash_field("particle_velx", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("particle_velx"),
+                units=r"\rm{cm}/\rm{s}")
+add_flash_field("particle_vely", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("particle_vely"),
+                units=r"\rm{cm}/\rm{s}")
+add_flash_field("particle_velz", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("particle_velz"),
+                units=r"\rm{cm}/\rm{s}")
+add_flash_field("particle_mass", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("particle_mass"),
+                units=r"\rm{g}")
+add_flash_field("temp", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("temp"),
+                units=r"\rm{K}")
+add_flash_field("pres", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("pres"),
+                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+add_flash_field("pden", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("pden"),
+                units=r"\rm{g}/\rm{cm}^3")
+add_flash_field("magx", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("magx"),
+                units = r"\mathrm{Gau\ss}")
+add_flash_field("magy", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("magy"),
+                units = r"\mathrm{Gau\ss}")
+add_flash_field("magz", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("magz"),
+                units = r"\mathrm{Gau\ss}")
+add_flash_field("magp", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("magp"),
+                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+add_flash_field("divb", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("divb"),
+                units = r"\mathrm{Gau\ss}\/\rm{cm}")
+add_flash_field("game", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("game"),
+                units=r"\rm{ratio\/of\/specific\/heats}")
+add_flash_field("gamc", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("gamc"),
+                units=r"\rm{ratio\/of\/specific\/heats}")
+add_flash_field("gpot", function=lambda a,b: None, take_log=True,
+                convert_function=_get_convert("gpot"),
+                units=r"\rm{ergs\//\/g}")
+add_flash_field("gpol", function=lambda a,b: None, take_log=False,
+                convert_function=_get_convert("gpol"),
+                units = r"\rm{ergs\//\/g}")
 
 for f,v in translation_dict.items():
-    if v not in FLASHFieldInfo:
+    if v not in KnownFLASHFields:
         pfield = v.startswith("particle")
         add_field(v, function=lambda a,b: None, take_log=False,
                   validators = [ValidateDataField(v)],
@@ -147,54 +188,6 @@
     #print "Setting up translator from %s to %s" % (v, f)
     _generate_translation(v, f)
 
-add_field("gamc", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("gamc")],
-          units = r"\rm{ratio\/of\/specific\/heats}")
-
-add_field("game", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("game")],
-          units = r"\rm{ratio\/of\/specific\/heats}")
-
-add_field("gpot", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("gpot")],
-          units = r"\rm{ergs\//\/g}")
-
-add_field("gpol", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("gpol")],
-          units = r"\rm{ergs\//\/g}")
-
-add_field("grac", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("grac")],
-          units = r"\rm{cm\/s^{-2}}")
-
-add_field("pden", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("pden")],
-          units = r"\rm{g}\//\/\rm{cm}^{3}")
-
-add_field("pres", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("pres")],
-          units = r"\rm{erg}\//\/\rm{cm}^{3}")
-
-add_field("magx", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("magx")],
-          units = r"\rm{G}")
-
-add_field("magy", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("magy")],
-          units = r"\rm{G}")
-
-add_field("magz", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("magz")],
-          units = r"\rm{G}")
-
-add_field("magp", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("magp")],
-          units = r"\rm{erg}\//\/\rm{cm}^{3}")
-
-add_field("divb", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("divb")],
-          units = r"\rm{G}\/\rm{cm}")
-
 def _convertParticleMassMsun(data):
     return 1.0/1.989e33
 def _ParticleMassMsun(field, data):



https://bitbucket.org/yt_analysis/yt/changeset/9689723b537d/
changeset:   9689723b537d
branch:      yt
user:        MatthewTurk
date:        2012-01-18 19:28:09
summary:     Adding a call that will initialize the FLASH conversion factor defaultdict for
every field found in the file.
affected #:  1 file

diff -r 8138c1d5540dac5d125c85d1ff477f6fa09831f7 -r 9689723b537de098f146880d05e18724420e684f yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -167,6 +167,8 @@
                 continue
             available = na.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
+        [self.parameter_file.conversion_factors[field] 
+         for field in self.field_list]
         for field in self.field_list:
             if field not in self.derived_field_list:
                 self.derived_field_list.append(field)



https://bitbucket.org/yt_analysis/yt/changeset/4fe82421186a/
changeset:   4fe82421186a
branch:      yt
user:        MatthewTurk
date:        2012-01-18 19:28:39
summary:     Merging
affected #:  21 files

diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -69,8 +69,7 @@
 from .halo_profiler.api import \
     VirialFilter, \
     HaloProfiler, \
-    FakeProfile, \
-    shift_projections
+    FakeProfile
 
 from .hierarchy_subset.api import \
     ConstructedRootGrid, \


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -227,7 +227,7 @@
         self._point_indices = {}
         self._vc_data = {}
         for key, val in kwargs.items():
-            mylog.info("Setting %s to %s", key, val)
+            mylog.debug("Setting %s to %s", key, val)
             self.set_field_parameter(key, val)
 
     def __set_default_field_parameters(self):
@@ -397,9 +397,10 @@
                      [self.field_parameters])
         return (_reconstruct_object, args)
 
-    def __repr__(self):
+    def __repr__(self, clean = False):
         # We'll do this the slow way to be clear what's going on
-        s = "%s (%s): " % (self.__class__.__name__, self.pf)
+        if clean: s = "%s: " % (self.__class__.__name__)
+        else: s = "%s (%s): " % (self.__class__.__name__, self.pf)
         s += ", ".join(["%s=%s" % (i, getattr(self,i))
                        for i in self._con_args])
         return s
@@ -2578,14 +2579,8 @@
         verts = []
         samples = []
         for i, g in enumerate(self._get_grid_objs()):
-            mask = self._get_cut_mask(g) * g.child_mask
-            vals = g.get_vertex_centered_data(field)
-            if sample_values is not None:
-                svals = g.get_vertex_centered_data(sample_values)
-            else:
-                svals = None
-            my_verts = march_cubes_grid(value, vals, mask, g.LeftEdge, g.dds,
-                                        svals)
+            my_verts = self._extract_isocontours_from_grid(
+                            g, field, value, sample_values)
             if sample_values is not None:
                 my_verts, svals = my_verts
                 samples.append(svals)
@@ -2612,6 +2607,20 @@
             return verts, samples
         return verts
 
+
+    @restore_grid_state
+    def _extract_isocontours_from_grid(self, grid, field, value,
+                                       sample_values = None):
+        mask = self._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(field)
+        if sample_values is not None:
+            svals = grid.get_vertex_centered_data(sample_values)
+        else:
+            svals = None
+        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
+                                    grid.dds, svals)
+        return my_verts
+
     def calculate_isocontour_flux(self, field, value,
                     field_x, field_y, field_z, fluxing_field = None):
         r"""This identifies isocontours on a cell-by-cell basis, with no
@@ -2678,19 +2687,25 @@
         """
         flux = 0.0
         for g in self._get_grid_objs():
-            mask = self._get_cut_mask(g) * g.child_mask
-            vals = g.get_vertex_centered_data(field)
-            if fluxing_field is None:
-                ff = na.ones(vals.shape, dtype="float64")
-            else:
-                ff = g.get_vertex_centered_data(fluxing_field)
-            xv, yv, zv = [g.get_vertex_centered_data(f) for f in 
-                         [field_x, field_y, field_z]]
-            flux += march_cubes_grid_flux(value, vals, xv, yv, zv,
-                        ff, mask, g.LeftEdge, g.dds)
+            flux += self._calculate_flux_in_grid(g, field, value,
+                    field_x, field_y, field_z, fluxing_field)
         flux = self.comm.mpi_allreduce(flux, op="sum")
         return flux
 
+    @restore_grid_state
+    def _calculate_flux_in_grid(self, grid, field, value,
+                    field_x, field_y, field_z, fluxing_field = None):
+        mask = self._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(field)
+        if fluxing_field is None:
+            ff = na.ones(vals.shape, dtype="float64")
+        else:
+            ff = grid.get_vertex_centered_data(fluxing_field)
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+                     [field_x, field_y, field_z]]
+        return march_cubes_grid_flux(value, vals, xv, yv, zv,
+                    ff, mask, grid.LeftEdge, grid.dds)
+
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
                                 log_space=True, cumulative=True, cache=False):
         """
@@ -2990,12 +3005,6 @@
                  & (r <= self._radius))
         return cm
 
-    def volume(self, unit="unitary"):
-        """
-        Return the volume of the cylinder in units of *unit*.
-        """
-        return math.pi * (self._radius)**2. * self._height * pf[unit]**3
-
 class AMRInclinedBox(AMR3DData):
     _type_name="inclined_box"
     _con_args = ('origin','box_vectors')
@@ -3342,11 +3351,13 @@
            na.any(self.right_edge + buffer > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
-                            self.right_edge + buffer, self.level)
+                            self.right_edge + buffer, self.level,
+                            min(self.level, self.pf.min_level))
         else:
             grids,ind = self.pf.hierarchy.get_box_grids_below_level(
                 self.left_edge - buffer,
-                self.right_edge + buffer, self.level)
+                self.right_edge + buffer, self.level,
+                min(self.level, self.pf.min_level))
         sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
         self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
@@ -3481,11 +3492,39 @@
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
-                 / self.pf.domain_dimensions).max()
-        AMRCoveringGridBase._get_list_of_grids(self, buffer)
-        # We reverse the order to ensure that coarse grids are first
-        self._grids = self._grids[::-1]
+        # Check for ill-behaved AMR schemes (Enzo) where we may have
+        # root-tile-boundary issues.  This is specific to the root tiles not
+        # allowing grids to cross them and also allowing > 1 level of
+        # difference between neighboring areas.
+        nz = 0
+        buf = 0.0
+        d1 = ((self.global_startindex.astype("float64") - 1)
+           / (self.pf.refine_by**self.level))
+        if na.any(d1 == na.rint(d1)):
+            nz = 2 * self.pf.refine_by**self.level
+            buf = self._base_dx
+        cg = self.pf.h.covering_grid(self.level,
+            self.left_edge - buf, self.ActiveDimensions + nz)
+        cg._use_pbar = False
+        count = cg.ActiveDimensions.prod()
+        for g in cg._grids:
+            count -= cg._get_data_from_grid(g, [])
+            if count <= 0:
+                min_level = g.Level
+                break
+        # This should not cost substantial additional time.
+        BLE = self.left_edge - buf
+        BRE = self.right_edge + buf
+        if na.any(BLE < self.pf.domain_left_edge) or \
+           na.any(BRE > self.pf.domain_right_edge):
+            grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
+                            BLE, BRE, self.level, min_level)
+        else:
+            grids,ind = self.pf.hierarchy.get_box_grids_below_level(
+                BLE, BRE, self.level,
+                min(self.level, min_level))
+        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)]
 
     def get_data(self, field=None):
         self._get_list_of_grids()
@@ -3509,9 +3548,10 @@
         for gi, grid in enumerate(self._grids):
             if self._use_pbar: pbar.update(gi)
             if grid.Level > last_level and grid.Level <= self.level:
-                self._update_level_state(last_level + 1)
-                self._refine(1, fields_to_get)
-                last_level = grid.Level
+                while grid.Level > last_level:
+                    self._update_level_state(last_level + 1)
+                    self._refine(1, fields_to_get)
+                    last_level += 1
             self._get_data_from_grid(grid, fields_to_get)
         if self.level > 0:
             for field in fields_to_get:
@@ -3658,6 +3698,19 @@
                     self._some_overlap.append(grid)
                     continue
     
+    def __repr__(self):
+        # We'll do this the slow way to be clear what's going on
+        s = "%s (%s): " % (self.__class__.__name__, self.pf)
+        s += "["
+        for i, region in enumerate(self.regions):
+            if region in ["OR", "AND", "NOT", "(", ")"]:
+                s += region
+            else:
+                s += region.__repr__(clean = True)
+            if i < (len(self.regions) - 1): s += ", "
+        s += "]"
+        return s
+    
     def _is_fully_enclosed(self, grid):
         return (grid in self._all_overlap)
 


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -134,6 +134,9 @@
                 # This is only going to be raised if n_gz > 0
                 n_gz = ngt_exception.ghost_zones
                 f_gz = ngt_exception.fields
+                if f_gz is None:
+                    f_gz = self.pf.field_info[field].get_dependencies(
+                            pf = self.pf).requested
                 gz_grid = self.retrieve_ghost_zones(n_gz, f_gz, smoothed=True)
                 temp_array = self.pf.field_info[field](gz_grid)
                 sl = [slice(n_gz, -n_gz)] * 3


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -205,17 +205,19 @@
                     mask[gi] = True
         return self.grids[mask], na.where(mask)
 
-    def get_box_grids_below_level(self, left_edge, right_edge, level):
+    def get_box_grids_below_level(self, left_edge, right_edge, level,
+                                  min_level = 0):
         # We discard grids if they are ABOVE the level
         mask = na.empty(self.grids.size, dtype='int32')
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
-                            self.grid_levels, mask)
+                            self.grid_levels, mask, min_level)
         mask = mask.astype("bool")
         return self.grids[mask], na.where(mask)
 
-    def get_periodic_box_grids_below_level(self, left_edge, right_edge, level):
+    def get_periodic_box_grids_below_level(self, left_edge, right_edge, level,
+                                           min_level = 0):
         mask = na.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
@@ -232,7 +234,8 @@
                 for off_z in [-1, 0, 1]:
                     nle[2] = (dw[2]*off_z + dl[2]) + left_dist[2]
                     nre = nle + db
-                    g, gi = self.get_box_grids_below_level(nle, nre, level)
+                    g, gi = self.get_box_grids_below_level(nle, nre,
+                                            level, min_level)
                     mask[gi] = True
         return self.grids[mask], na.where(mask)
 


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -87,6 +87,8 @@
         # to get the timing right, do this before the heavy lifting
         self._instantiated = time.time()
 
+        self.min_level = 0
+
         self._parse_parameter_file()
         self._set_units()
 


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -882,6 +882,8 @@
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
 add_field("VorticitySquared", function=_VorticitySquared,
-          validators=[ValidateSpatial(1)],
+          validators=[ValidateSpatial(1,
+              ["x-velocity","y-velocity","z-velocity"])],
           units=r"\rm{s}^{-2}",
           convert_function=_convertVorticitySquared)
+


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -288,6 +288,8 @@
             [self._find_parameter("real", "%smin" % ax) for ax in 'xyz'])
         self.domain_right_edge = na.array(
             [self._find_parameter("real", "%smax" % ax) for ax in 'xyz'])
+        self.min_level = self._find_parameter(
+            "integer", "lrefine_min", scalar = False) - 1
 
         # Determine domain dimensions
         try:


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -1,13 +1,14 @@
 """
-API for yt.frontends.chombo
+API for yt.frontends.gdf
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
 Author: J.S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <brittonsmith at gmail.com>
 Affiliation: MSU
-Homepage: http://yt.Chombotools.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -1,12 +1,15 @@
 """
-Data structures for Chombo.
+Data structures for GDF.
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2008-2011 Matthew Turk, J. S. Oishi.  All Rights Reserved.
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
 
   This file is part of yt.
 
@@ -76,7 +79,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self._fhandle = h5py.File(self.hierarchy_filename)
+        self._fhandle = h5py.File(self.hierarchy_filename,'r')
         AMRHierarchy.__init__(self,pf,data_style)
 
         self._fhandle.close()
@@ -94,31 +97,31 @@
 
     def _count_grids(self):
         self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
-        
+       
     def _parse_hierarchy(self):
         f = self._fhandle 
-        
-        # this relies on the first Group in the H5 file being
-        # 'Chombo_global'
-        levels = f.listnames()[1:]
         dxs=[]
         self.grids = na.empty(self.num_grids, dtype='object')
-        for i, grid in enumerate(f['data'].keys()):
-            self.grids[i] = self.grid(i, self, f['grid_level'][i],
-                                      f['grid_left_index'][i],
-                                      f['grid_dimensions'][i])
-            self.grids[i]._level_id = f['grid_level'][i]
+        levels = (f['grid_level'][:]).copy()
+        glis = (f['grid_left_index'][:]).copy()
+        gdims = (f['grid_dimensions'][:]).copy()
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+            self.grids[i]._level_id = levels[i]
 
             dx = (self.parameter_file.domain_right_edge-
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
-            dx = dx/self.parameter_file.refine_by**(f['grid_level'][i])
+            dx = dx/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
         dx = na.array(dxs)
-        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
-        self.grid_dimensions = f['grid_dimensions'][:].astype("int32")
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
         self.grid_particle_count = f['grid_particle_count'][:]
-
+        del levels, glis, gdims
+ 
     def _populate_grid_objects(self):
         for g in self.grids:
             g._prepare_grid()
@@ -130,9 +133,6 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        pass
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -171,7 +171,11 @@
         # This should be improved.
         self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
-            self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+            try:
+                self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+            except:
+                self.units[field_name] = 1.0
+
         self._handle.close()
         del self._handle
         
@@ -181,7 +185,9 @@
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]
         self.domain_dimensions = sp["domain_dimensions"][:]
-        self.refine_by = sp["refine_by"]
+        refine_by = sp["refine_by"]
+        if refine_by is None: refine_by = 2
+        self.refine_by = refine_by 
         self.dimensionality = sp["dimensionality"]
         self.current_time = sp["current_time"]
         self.unique_identifier = sp["unique_identifier"]
@@ -198,6 +204,7 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
         self._handle.close()
         del self._handle


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -1,11 +1,14 @@
 """
 GDF-specific fields
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2009-2011 J. S. Oishi, Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
 
   This file is part of yt.
 
@@ -53,40 +56,31 @@
 add_gdf_field = KnownGDFFields.add_field
 
 add_gdf_field("density", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("density")],
           units=r"\rm{g}/\rm{cm}^3",
           projected_units =r"\rm{g}/\rm{cm}^2")
 
 add_gdf_field("specific_energy", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("specific_energy")],
           units=r"\rm{erg}/\rm{g}")
 
 add_gdf_field("pressure", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("pressure")],
           units=r"\rm{erg}/\rm{g}")
 
-add_gdf_field("velocity_x", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("velocity_x")],
+add_gdf_field("velocity_x", function=NullFunc, take_log=False,
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("velocity_y", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("velocity_y")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("velocity_z", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("velocity_z")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_x", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_x")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_y", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_y")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_z", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_z")],
           units=r"\rm{cm}/\rm{s}")
 
 for f,v in log_translation_dict.items():


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -1,6 +1,8 @@
 """
 The data-file handling functions
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
@@ -35,38 +37,33 @@
     def _field_dict(self,fhandle):
         keys = fhandle['field_types'].keys()
         val = fhandle['field_types'].keys()
-        # ncomp = int(fhandle['/'].attrs['num_components'])
-        # temp =  fhandle['/'].attrs.listitems()[-ncomp:]
-        # val, keys = zip(*temp)
-        # val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
         return dict(zip(keys,val))
         
     def _read_field_names(self,grid):
         fhandle = h5py.File(grid.filename,'r')
-        return fhandle['field_types'].keys()
+        names = fhandle['field_types'].keys()
+        fhandle.close()
+        return names
     
     def _read_data_set(self,grid,field):
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
-        return fhandle['/data/grid_%010i/'%grid.id+field][:]
-        # field_dict = self._field_dict(fhandle)
-        # lstring = 'level_%i' % grid.Level
-        # lev = fhandle[lstring]
-        # dims = grid.ActiveDimensions
-        # boxsize = dims.prod()
-        
-        # grid_offset = lev[self._offset_string][grid._level_id]
-        # start = grid_offset+field_dict[field]*boxsize
-        # stop = start + boxsize
-        # data = lev[self._data_string][start:stop]
-
-        # return data.reshape(dims, order='F')
-                                          
+        data = (fhandle['/data/grid_%010i/'%grid.id+field][:]).copy()
+        fhandle.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
+        if grid.pf.field_ordering == 1:
+            sl.reverse()
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
-        return fhandle['/data/grid_%010i/'%grid.id+field][:][sl]
+        data = (fhandle['/data/grid_%010i/'%grid.id+field][:][sl]).copy()
+        fhandle.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
 
-    # return self._read_data_set(grid,field)[sl]
-


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -471,9 +471,30 @@
     import pkg_resources
     yt_provider = pkg_resources.get_provider("yt")
     path = os.path.dirname(yt_provider.module_path)
-    version = _get_hg_version(path)[:12]
+    version = get_hg_version(path)[:12]
     return version
 
+def get_version_stack():
+    import numpy, matplotlib, h5py
+    version_info = {}
+    version_info['yt'] = get_yt_version()
+    version_info['numpy'] = numpy.version.version
+    version_info['matplotlib'] = matplotlib.__version__
+    version_info['h5py'] = h5py.version.version
+    return version_info
+
+def get_script_contents():
+    stack = inspect.stack()
+    top_frame = inspect.stack()[-1]
+    finfo = inspect.getframeinfo(top_frame[0])
+    if finfo[2] != "<module>": return None
+    if not os.path.exists(finfo[0]): return None
+    try:
+        contents = open(finfo[0]).read()
+    except:
+        contents = None
+    return contents
+
 # This code snippet is modified from Georg Brandl
 def bb_apicall(endpoint, data, use_pass = True):
     import urllib, urllib2


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -119,7 +119,7 @@
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
-    HomogenizedVolume, Camera, off_axis_projection
+    HomogenizedVolume, Camera, off_axis_projection, MosaicFisheyeCamera
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/pmods.py
--- /dev/null
+++ b/yt/pmods.py
@@ -0,0 +1,359 @@
+#####
+#
+# This code is included in yt, but was not originally written by yt.  For the
+# original source, please see this discussion:
+#
+#   https://groups.google.com/forum/#!topic/mpi4py/DYNXufzfKPA
+#
+#   and the repository:
+#
+#   https://github.com/langton/MPI_Import
+#
+#
+# Inclusion with yt does not imply any change in license of the original code.
+# Our local modifications, to fix recursive imports and to use mpi4py, are
+# under released under the original code license.
+#
+#####
+
+
+# This code is derived from knee.py, which was included in the Python
+# 2.6 distribution.
+#
+# The modifications to this code are copyright (c) 2011, Lawrence
+# Livermore National Security, LLC. Produced at the Lawrence Livermore
+# National Laboratory. Written by Tim Kadich and Asher Langton
+# <langton2 at llnl.gov>. Released as LLNL-CODE-522751 under the name
+# SmartImport.py, version 1.0. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the disclaimer below.
+#
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the disclaimer (as noted below)
+#   in the documentation and/or other materials provided with the
+#   distribution.
+#
+# - Neither the name of the LLNS/LLNL nor the names of its contributors
+#   may be used to endorse or promote products derived from this
+#   software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
+# LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Additional BSD Notice
+#
+# 1. This notice is required to be provided under our contract with the
+# U.S. Department of Energy (DOE). This work was produced at Lawrence
+# Livermore National Laboratory under Contract No. DE-AC52-07NA27344
+# with the DOE.
+#
+# 2. Neither the United States Government nor Lawrence Livermore
+# National Security, LLC nor any of their employees, makes any warranty,
+# express or implied, or assumes any liability or responsibility for the
+# accuracy, completeness, or usefulness of any information, apparatus,
+# product, or process disclosed, or represents that its use would not
+# infringe privately-owned rights.
+#
+# 3. Also, reference herein to any specific commercial products,
+# process, or services by trade name, trademark, manufacturer or
+# otherwise does not necessarily constitute or imply its endorsement,
+# recommendation, or favoring by the United States Government or
+# Lawrence Livermore National Security, LLC. The views and opinions of
+# authors expressed herein do not necessarily state or reflect those of
+# the United States Government or Lawrence Livermore National Security,
+# LLC, and shall not be used for advertising or product endorsement
+# purposes.
+
+"""MPI_Import defines an mpi-aware import hook. The standard use of
+this module is as follows:
+
+   from MPI_Import import mpi_import
+   with mpi_import():
+      import foo
+      import bar
+
+Within the with block, the standard import statement is replaced by an
+MPI-aware import statement. The rank 0 process finds the location of
+each module to import, broadcasts the location, then all of the
+processes load that module.
+
+One CRITICAL detail: any code inside the mpi_import block must be
+executed exactly the same on all of the MPI ranks. For example,
+consider this:
+
+def foo():
+   import mpi
+   if mpi.rank == 0:
+      bar = someFunction()
+   bar = mpi.bcast(bar,root=0)
+
+def someFunction():
+   import os
+   return os.name
+
+If foo() is called during the import process, then things may go very
+wrong. If the os module hasn't been loaded, then the rank 0 process
+will find os and broadcast its location. Since there's no
+corresponding bcast for rank > 0, the other processes will receive
+that broadcast instead of the broadcast for bar, resulting in
+undefined behavior. Similarly, if rank >0 process encounters an import
+that rank 0 does not encounter, that process will either hang waiting
+for the bcast, or it will receive an out-of-order bcast.
+
+The import hook provides a way to test whether we're using this
+importer, which can be used to disable rank-asymmetric behavior in a
+module import:
+
+import __builtin__
+hasattr(__builtin__.__import__,"mpi_import")
+
+This evaluates to True only when we're in an mpi_import() context
+manager.
+
+There are some situations where rank-dependent code may be necessary.
+One such example is pyMPI's synchronizeQueuedOutput function, which
+tends to cause deadlocks when it is executed inside an mpi_imported
+module. In that case, we provide a hook to execute a function after
+the mpi_import hook has been replaced by the standard import hook.
+Here is an example showing the use of this feature:
+
+# encapsulate the rank-asymmetric code in a function
+def f():
+    if mpi.rank == 0:
+        doOneThing()
+    else:
+        doSomethingElse()
+
+# Either importer is None (standard import) or it's a reference to
+# the mpi_import object that owns the current importer.
+import __builtin__
+importer = getattr(__builtin__.__import__,"mpi_import",None)
+if importer:
+    importer.callAfterImport(f)
+else:
+    # If we're using the standard import, then we'll execute the
+    # code in f immediately
+    f()
+
+WARNING: the callAfterImport feature is not intended for casual use.
+Usually it will be sufficient (and preferable) to either remove the
+rank-asymmetric code or explicitly move it outside of the 'with
+mpi_import' block. callAfterImport is provided for the (hopefully
+rare!) cases where this does not suffice.
+
+
+Some implementation details:
+
+-This code is based on knee.py, which is an example of a pure Python
+ hierarchical import that was included with Python 2.6 distributions.
+
+-Python PEP 302 defines another way to override import by using finder
+ and loader objects, which behave similarly to the imp.find_module and
+ imp.load_module functions in __import_module__ below. Unfortunately,
+ the implementation of PEP 302 is such that the path for the module
+ has already been found by the time that the "finder" object is
+ constructed, so it's not suitable for our purposes.
+
+-This module uses pyMPI. It was originally designed with mpi4py, and
+ switching back to mpi4py requires only minor modifications. To
+ quickly substitute mpi4py for pyMPI, the 'import mpi' line below can
+ be replaced with the following wrapper:
+
+from mpi4py import MPI
+class mpi(object):
+    rank = MPI.COMM_WORLD.Get_rank()
+    @staticmethod
+    def bcast(obj=None,root=0):
+        return MPI.COMM_WORLD.bcast(obj,root)
+
+-An alternate version of this module had rank 0 perform all of the
+ lookups, and then broadcast the locations all-at-once when that
+ process reached the end of the context manager. This was somewhat
+ faster than the current implementation, but was prone to deadlock
+ when loading modules containing MPI synchronization points.
+
+-The 'level' parameter to the import hook is not handled correctly; we
+ treat it as if it were -1 (try relative and absolute imports). For
+ more information about the level parameter, run 'help(__import__)'.
+"""
+
+import sys, imp, __builtin__,types
+from mpi4py import MPI
+class mpi(object):
+    rank = MPI.COMM_WORLD.Get_rank()
+    @staticmethod
+    def bcast(obj=None,root=0):
+        return MPI.COMM_WORLD.bcast(obj,root)
+
+class mpi_import(object):
+    def __enter__(self):
+        imp.acquire_lock()
+        __import_hook__.mpi_import = self
+        self.__funcs = []
+        self.original_import = __builtin__.__import__
+        __builtin__.__import__ = __import_hook__
+
+    def __exit__(self,type,value,traceback):
+        __builtin__.__import__ = self.original_import
+        __import_hook__.mpi_import = None
+        imp.release_lock()
+        for f in self.__funcs:
+            f()
+
+    def callAfterImport(self,f):
+        "Add f to the list of functions to call on exit"
+        if type(f) != types.FunctionType:
+            raise TypeError("Argument must be a function!")
+        self.__funcs.append(f)
+
+
+# The remaining code is for internal use only. Do not explicitly call
+# call any of the following functions.
+
+# Replacement for __import__(). Taken from knee.py; unmodified except for the
+# (unused) level parameter.
+def __import_hook__(name, globals=None, locals=None, fromlist=None, level=-1):
+    # TODO: handle level parameter correctly. For now, we'll ignore
+    # it and try both absolute and relative imports.
+    parent = __determine_parent__(globals)
+    q, tail = __find_head_package__(parent, name)
+    m = __load_tail__(q, tail)
+    if not fromlist:
+        return q
+    if hasattr(m, "__path__"):
+        __ensure_fromlist__(m, fromlist)
+    return m
+
+# __import_module__ is the only part of knee.py with non-trivial changes.
+# The MPI rank 0 process handles the lookup and broadcasts the location to
+# the others. This must be called synchronously, at least in the case that
+# 'fqname' is not already in sys.modules.
+def __import_module__(partname, fqname, parent):
+    fqname = fqname.rstrip(".")
+    try:
+        return sys.modules[fqname]
+    except KeyError:
+        pass
+    fp = None         # module's file
+    pathname = None   # module's location
+    stuff = None      # tuple of (suffix,mode,type) for the module
+    ierror = False    # are we propagating an import error from rank 0?
+
+    # Start with the lookup on rank 0. The other processes will be waiting
+    # on a broadcast, so we need to send one even if we're bailing out due
+    # to an import error.
+    if mpi.rank == 0:
+        try:
+            fp, pathname, stuff = imp.find_module(partname,
+                                                  parent and parent.__path__)
+        except ImportError:
+            ierror = True
+            return None
+        finally:
+            pathname,stuff,ierror = mpi.bcast((pathname,stuff,ierror))
+    else:
+        pathname,stuff,ierror = mpi.bcast((pathname,stuff,ierror))
+        if ierror:
+            return None
+        # If imp.find_module returned an open file to rank 0, then we should
+        # open the corresponding file for this process too.
+        if stuff and stuff[1]:
+            fp = open(pathname,stuff[1])
+
+    try:
+        m = imp.load_module(fqname, fp, pathname, stuff)
+    finally:
+        if fp: fp.close()
+    if parent:
+        setattr(parent, partname, m)
+    return m
+
+
+# The remaining functions are taken unmodified (except for the names)
+# from knee.py.
+def __determine_parent__(globals):
+    if not globals or  not globals.has_key("__name__"):
+        return None
+    pname = globals['__name__']
+    if globals.has_key("__path__"):
+        parent = sys.modules[pname]
+        assert globals is parent.__dict__
+        return parent
+    if '.' in pname:
+        i = pname.rfind('.')
+        pname = pname[:i]
+        parent = sys.modules[pname]
+        assert parent.__name__ == pname
+        return parent
+    return None
+
+def __find_head_package__(parent, name):
+    if '.' in name:
+        i = name.find('.')
+        head = name[:i]
+        tail = name[i+1:]
+    else:
+        head = name
+        tail = ""
+    if parent:
+        qname = "%s.%s" % (parent.__name__, head)
+    else:
+        qname = head
+    q = __import_module__(head, qname, parent)
+    if q: return q, tail
+    if parent:
+        qname = head
+        parent = None
+        q = __import_module__(head, qname, parent)
+        if q: return q, tail
+    raise ImportError, "No module named " + qname
+
+def __load_tail__(q, tail):
+    m = q
+    while tail:
+        i = tail.find('.')
+        if i < 0: i = len(tail)
+        head, tail = tail[:i], tail[i+1:]
+        mname = "%s.%s" % (m.__name__, head)
+        m = __import_module__(head, mname, m)
+        if not m:
+            raise ImportError, "No module named " + mname
+    return m
+
+def __ensure_fromlist__(m, fromlist, recursive=0):
+    for sub in fromlist:
+        if sub == "*":
+            if not recursive:
+                try:
+                    all = m.__all__
+                except AttributeError:
+                    pass
+                else:
+                    __ensure_fromlist__(m, all, 1)
+            continue
+        if sub != "*" and not hasattr(m, sub):
+            subname = "%s.%s" % (m.__name__, sub)
+            submod = __import_module__(sub, subname, m)
+            if not submod:
+                raise ImportError, "No module named " + subname
+
+# Now we import all the yt.mods items.
+with mpi_import():
+    if MPI.COMM_WORLD.rank == 0: print "Beginning parallel import block."
+    from yt.mods import *
+    if MPI.COMM_WORLD.rank == 0: print "Ending parallel import block."


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -241,7 +241,8 @@
         tr[i] = ipnest
     return tr
 
-def arr_fisheye_vectors(int resolution, np.float64_t fov):
+def arr_fisheye_vectors(int resolution, np.float64_t fov, int nimx=1, int
+        nimy=1, int nimi=0, int nimj=0):
     # We now follow figures 4-7 of:
     # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
     # ...but all in Cython.
@@ -250,11 +251,13 @@
     cdef np.float64_t r, phi, theta, px, py
     cdef np.float64_t pi = 3.1415926
     cdef np.float64_t fov_rad = fov * pi / 180.0
-    vp = np.zeros((resolution, resolution, 3), dtype="float64")
-    for i in range(resolution):
-        px = 2.0 * i / (resolution) - 1.0
-        for j in range(resolution):
-            py = 2.0 * j / (resolution) - 1.0
+    cdef int nx = resolution/nimx
+    cdef int ny = resolution/nimy
+    vp = np.zeros((nx,ny, 3), dtype="float64")
+    for i in range(nx):
+        px = 2.0 * (nimi*nx + i) / (resolution) - 1.0
+        for j in range(ny):
+            py = 2.0 * (nimj*ny + j) / (resolution) - 1.0
             r = (px*px + py*py)**0.5
             if r == 0.0:
                 phi = 0.0


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -96,13 +96,14 @@
                         np.ndarray[np.float64_t, ndim=2] left_edges,
                         np.ndarray[np.float64_t, ndim=2] right_edges,
                         np.ndarray[np.int32_t, ndim=2] levels,
-                        np.ndarray[np.int32_t, ndim=1] mask):
+                        np.ndarray[np.int32_t, ndim=1] mask,
+                        int min_level = 0):
     cdef int i, n
     cdef int nx = left_edges.shape[0]
     cdef int inside 
     for i in range(nx):
         mask[i] = 0
-        if levels[i,0] <= level:
+        if levels[i,0] <= level and levels[i,0] >= min_level:
             inside = 1
             for n in range(3):
                 if left_edge[n] >= right_edges[i,n] or \


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -90,11 +90,12 @@
 
 class GetParameterFiles(argparse.Action):
     def __call__(self, parser, namespace, values, option_string = None):
+        print parser, namespace, values, option_string
         if len(values) == 1:
             pfs = values
         elif len(values) == 2 and namespace.basename is not None:
-            pfs = ["%s%04i" % (opts.basename, r)
-                   for r in range(int(values[0]), int(values[1]), opts.skip) ]
+            pfs = ["%s%04i" % (namespace.basename, r)
+                   for r in range(int(values[0]), int(values[1]), namespace.skip) ]
         else:
             pfs = values
         namespace.pf = [_fix_pf(pf) for pf in pfs]


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -1059,9 +1059,9 @@
 
     int n_fields = PyList_Size(oc_data);
     if(n_fields == 0) {
-      PyErr_Format(_dataCubeError,
+      /*PyErr_Format(_dataCubeError,
           "CombineGrids: Length zero for c_data is invalid.");
-      goto _fail;
+      goto _fail;*/
     }
     if (!PyList_Check(og_data) || (PyList_Size(og_data) != n_fields)){
       PyErr_Format(_dataCubeError,


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -348,7 +348,8 @@
             to_share[rstore.result_id] = rstore.result
         else:
             yield obj
-    communication_system.communicators.pop()
+    if parallel_capable:
+        communication_system.communicators.pop()
     if storage is not None:
         # Now we have to broadcast it
         new_storage = my_communicator.par_combine_object(


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -40,4 +40,4 @@
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
-    off_axis_projection
+    off_axis_projection, FisheyeCamera, MosaicFisheyeCamera


diff -r 9689723b537de098f146880d05e18724420e684f -r 4fe82421186ada168889384135c147ef9191a81b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -36,7 +36,7 @@
 from yt.visualization.image_writer import write_bitmap
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
+    ParallelAnalysisInterface, ProcessorPool
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from numpy import pi
 
@@ -797,8 +797,10 @@
     def __init__(self, center, radius, fov, resolution,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
-                 pf = None, no_ghost=False):
+                 pf = None, no_ghost=False, rotation = None):
         ParallelAnalysisInterface.__init__(self)
+        if rotation is None: rotation = na.eye(3)
+        self.rotation = rotation
         if pf is not None: self.pf = pf
         self.center = na.array(center, dtype='float64')
         self.radius = radius
@@ -825,6 +827,11 @@
         # ...but all in Cython.
         vp = arr_fisheye_vectors(self.resolution, self.fov)
         vp.shape = (self.resolution**2,1,3)
+        vp2 = vp.copy()
+        for i in range(3):
+            vp[:,:,i] = (vp2 * self.rotation[:,i]).sum(axis=2)
+        del vp2
+        vp *= self.radius
         uv = na.ones(3, dtype='float64')
         positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
         vector_plane = VectorPlane(positions, vp, self.center,
@@ -845,6 +852,242 @@
         image.shape = (self.resolution, self.resolution, 3)
         return image
 
+class MosaicFisheyeCamera(Camera):
+    def __init__(self, center, radius, fov, resolution,
+                 transfer_function = None, fields = None,
+                 sub_samples = 5, log_fields = None, volume = None,
+                 pf = None, l_max=None, no_ghost=False,nimx=1, nimy=1, procs_per_wg=None,
+                 rotation=None):
+        r"""A fisheye lens camera, taking adantage of image plane decomposition
+        for parallelism..
+
+        The camera represents the eye of an observer, which will be used to
+        generate ray-cast volume renderings of the domain. In this case, the
+        rays are defined by a fisheye lens
+
+        Parameters
+        ----------
+        center : array_like
+            The current "center" of the observer, from which the rays will be
+            cast
+        radius : float
+            The radial distance to cast to
+        resolution : int
+            The number of pixels in each direction.  Must be a single int.
+        volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
+            The volume to ray cast through.  Can be specified for finer-grained
+            control, but otherwise will be automatically generated.
+        fields : list of fields, optional
+            This is the list of fields we want to volume render; defaults to
+            Density.
+        log_fields : list of bool, optional
+            Whether we should take the log of the fields before supplying them to
+            the volume rendering mechanism.
+        sub_samples : int, optional
+            The number of samples to take inside every cell per ray.
+        pf : `~yt.data_objects.api.StaticOutput`
+            For now, this is a require parameter!  But in the future it will become
+            optional.  This is the parameter file to volume render.
+        l_max: int, optional
+            Specifies the maximum level to be rendered.  Also
+            specifies the maximum level used in the AMRKDTree
+            construction.  Defaults to None (all levels), and only
+            applies if use_kd=True.
+        no_ghost: bool, optional
+            Optimization option.  If True, homogenized bricks will
+            extrapolate out from grid instead of interpolating from
+            ghost zones that have to first be calculated.  This can
+            lead to large speed improvements, but at a loss of
+            accuracy/smoothness in resulting image.  The effects are
+            less notable when the transfer function is smooth and
+            broad. Default: False
+        nimx: int, optional
+            The number by which to decompose the image plane into in the x
+            direction.  Must evenly divide the resolution.
+        nimy: int, optional
+            The number by which to decompose the image plane into in the y 
+            direction.  Must evenly divide the resolution.
+        procs_per_wg: int, optional
+            The number of processors to use on each sub-image. Within each
+            subplane, the volume will be decomposed using the AMRKDTree with
+            procs_per_wg processors.  
+
+        Notes
+        -----
+            The product of nimx*nimy*procs_per_wg must be equal to or less than
+            the total number of mpi processes.  
+
+            Unlike the non-Mosaic camera, this will only return each sub-image
+            to the root processor of each sub-image workgroup in order to save
+            memory.  To save the final image, one must then call
+            MosaicFisheyeCamera.save_image('filename')
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        
+        >>> pf = load('DD1717')
+        
+        >>> N = 512 # Pixels (1024^2)
+        >>> c = (pf.domain_right_edge + pf.domain_left_edge)/2. # Center
+        >>> radius = (pf.domain_right_edge - pf.domain_left_edge)/2.
+        >>> fov = 180.0
+        
+        >>> field='Density'
+        >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
+        >>> mi,ma = na.log10(mi), na.log10(ma)
+        
+        # You may want to comment out the above lines and manually set the min and max
+        # of the log of the Density field. For example:
+        # mi,ma = -30.5,-26.5
+        
+        # Another good place to center the camera is close to the maximum density.
+        # v,c = pf.h.find_max('Density')
+        # c -= 0.1*radius
+        
+       
+        # Construct transfer function
+        >>> tf = ColorTransferFunction((mi-1, ma+1),nbins=1024)
+        
+        # Sample transfer function with Nc gaussians.  Use col_bounds keyword to limit
+        # the color range to the min and max values, rather than the transfer function
+        # bounds.
+        >>> Nc = 5
+        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+        >>>         colormap='RdBu_r')
+        >>> 
+        # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
+        # spent creating vertex-centered data. In this case I'm running with 8
+        # processors, and am splitting the image plane into 4 pieces and using 2
+        # processors on each piece.
+        >>> cam = MosaicFisheyeCamera(c, radius, fov, N,
+        >>>         transfer_function = tf, 
+        >>>         sub_samples = 5, 
+        >>>         pf=pf, 
+        >>>         nimx=2,nimy=2,procs_per_wg=4)
+        
+        # Take a snapshot
+        >>> im = cam.snapshot()
+        
+        # Save the image
+        >>> cam.save_image('fisheye_mosaic.png')
+
+        """
+
+        ParallelAnalysisInterface.__init__(self)
+        PP = ProcessorPool()
+        if procs_per_wg is None:
+            procs_per_wg = PP.size
+        for j in range(nimy):
+            for i in range(nimx):
+                PP.add_workgroup(size=procs_per_wg, name='%04i_%04i'%(i,j))
+                
+        for wg in PP.workgroups:
+            if self.comm.rank in wg.ranks:
+                my_wg = wg
+        
+        self.global_comm = self.comm
+        self.comm = my_wg.comm
+        self.wg = my_wg
+        self.imi = int(self.wg.name[0:4])
+        self.imj = int(self.wg.name[5:9])
+        print 'My new communicator has the name %s' % self.wg.name
+
+        if pf is not None: self.pf = pf
+    
+        if rotation is None: rotation = na.eye(3)
+        self.rotation = rotation
+
+        if iterable(resolution):
+            raise RuntimeError("Resolution must be a single int")
+        self.resolution = resolution
+        self.nimx = nimx
+        self.nimy = nimy
+        self.center = na.array(center, dtype='float64')
+        self.radius = radius
+        self.fov = fov
+        if transfer_function is None:
+            transfer_function = ProjectionTransferFunction()
+        self.transfer_function = transfer_function
+        if fields is None: fields = ["Density"]
+        self.fields = fields
+        self.sub_samples = sub_samples
+        self.log_fields = log_fields
+        if volume is None:
+            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+                               log_fields=log_fields,l_max=l_max)
+        self.volume = volume
+
+    def snapshot(self):
+        # We now follow figures 4-7 of:
+        # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+        # ...but all in Cython.
+
+        vp = arr_fisheye_vectors(self.resolution, self.fov, self.nimx,
+                self.nimy, self.imi, self.imj)
+        vp2 = vp.copy()
+        for i in range(3):
+            vp[:,:,i] = (vp2 * self.rotation[:,i]).sum(axis=2)
+        del vp2
+ 
+        vp *= self.radius
+        nx, ny = vp.shape[0], vp.shape[1]
+        vp.shape = (nx*ny,1,3)
+        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
+        uv = na.ones(3, dtype='float64')
+        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+        vector_plane = VectorPlane(positions, vp, self.center,
+                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+        tfp = TransferFunctionProxy(self.transfer_function)
+        tfp.ns = self.sub_samples
+        self.volume.initialize_source()
+        mylog.info("Rendering fisheye of %s^2", self.resolution)
+        pbar = get_pbar("Ray casting",
+                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+
+        total_cells = 0
+        for brick in self.volume.traverse(None, self.center, image):
+            brick.cast_plane(tfp, vector_plane)
+            total_cells += na.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+        pbar.finish()
+        image.shape = (nx, ny, 3)
+
+        self.image = image
+       
+        return image
+
+    def save_image(self, fn):
+        if '.png' not in fn:
+            fn = fn + '.png'
+        
+        try:
+            image = self.image
+        except:
+            mylog.error('You must first take a snapshot')
+            raise(UserWarning)
+        
+        image = self.image
+        nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
+
+        if self.comm.rank == 0:
+            if self.global_comm.rank == 0:
+                final_image = na.empty((nx*self.nimx, 
+                    ny*self.nimy, 3),
+                    dtype='float64',order='C')
+                final_image[:nx, :ny, :] = image
+                for j in range(self.nimy):
+                    for i in range(self.nimx):
+                        if i==0 and j==0: continue
+                        arr = self.global_comm.recv_array((self.wg.size)*(j*self.nimx + i), tag = (self.wg.size)*(j*self.nimx + i))
+
+                        final_image[i*nx:(i+1)*nx, j*ny:(j+1)*ny,:] = arr
+                        del arr
+                write_bitmap(final_image, fn)
+            else:
+                self.global_comm.send_array(image, 0, tag = self.global_comm.rank)
+        return
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, volume = None, no_ghost = True):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list