[yt-svn] commit/yt: 32 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Nov 28 06:47:44 PST 2013


32 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/476873d72b74/
Changeset:   476873d72b74
Branch:      yt-3.0
User:        ChrisMalone
Date:        2013-11-04 19:03:09
Summary:     properly catch Maestro files
Affected #:  1 file

diff -r 0ca385183a06b5fd4057a139ccc7e29db348124d -r 476873d72b746a2cac6e565cae70305c1a1f343f yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -717,12 +717,11 @@
                             args['fparam_filename'])
         if not os.path.exists(jobinfo_filename):
             return False
-        if not os.path.exists(fparam_filename):
+        if os.path.exists(fparam_filename):
             return False
-        # Now we check for all the others
+        # Now we check the job_info for the mention of maestro
         lines = open(jobinfo_filename).readlines()
-        # Maestro outputs have "Castro" in them
-        if any(line.startswith("Castro   ") for line in lines): return True
+        if any("maestro" in line.lower() for line in lines): return True
         return False
 
 


https://bitbucket.org/yt_analysis/yt/commits/d0c398ddec51/
Changeset:   d0c398ddec51
Branch:      yt-3.0
User:        ChrisMalone
Date:        2013-11-04 19:24:42
Summary:     actually, Castro and Maestro don't care about an fparam file
Affected #:  1 file

diff -r 476873d72b746a2cac6e565cae70305c1a1f343f -r d0c398ddec51ac01d55d95e0257148ec5b884569 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -685,15 +685,8 @@
         if not os.path.exists(header_filename):
             # We *know* it's not boxlib if Header doesn't exist.
             return False
-        args = inspect.getcallargs(cls.__init__, args, kwargs)
-        # This might need to be localized somehow
-        fparam_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['fparam_filename'])
         if not os.path.exists(jobinfo_filename):
             return False
-        if os.path.exists(fparam_filename):
-            return False
         # Now we check for all the others
         lines = open(jobinfo_filename).readlines()
         if any(line.startswith("Castro   ") for line in lines): return True
@@ -710,15 +703,8 @@
         if not os.path.exists(header_filename):
             # We *know* it's not boxlib if Header doesn't exist.
             return False
-        args = inspect.getcallargs(cls.__init__, args, kwargs)
-        # This might need to be localized somehow
-        fparam_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['fparam_filename'])
         if not os.path.exists(jobinfo_filename):
             return False
-        if os.path.exists(fparam_filename):
-            return False
         # Now we check the job_info for the mention of maestro
         lines = open(jobinfo_filename).readlines()
         if any("maestro" in line.lower() for line in lines): return True


https://bitbucket.org/yt_analysis/yt/commits/67fab8ea9ea6/
Changeset:   67fab8ea9ea6
Branch:      yt-3.0
User:        ChrisMalone
Date:        2013-11-04 19:28:35
Summary:     make sure Orion isn't confused with Maestro
Affected #:  1 file

diff -r d0c398ddec51ac01d55d95e0257148ec5b884569 -r 67fab8ea9ea6841dae5b9000537f2ed4575fff3d yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -671,6 +671,7 @@
         lines = open(inputs_filename).readlines()
         if any(("castro." in line for line in lines)): return False
         if any(("nyx." in line for line in lines)): return False
+        if any(("maestro" in line.lower() for line in lines)): return False
         if any(("geometry.prob_lo" in line for line in lines)): return True
         return False
 


https://bitbucket.org/yt_analysis/yt/commits/4767e46461c0/
Changeset:   4767e46461c0
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-11-07 00:53:03
Summary:     Fixes for active particle i/o.
Affected #:  2 files

diff -r 67fab8ea9ea6841dae5b9000537f2ed4575fff3d -r 4767e46461c0bb35e0190155a23aaba95d6d7ce8 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -122,7 +122,10 @@
     @property
     def NumberOfActiveParticles(self):
         if not hasattr(self.hierarchy, "grid_active_particle_count"): return 0
-        return self.hierarchy.grid_active_particle_count[self.id - self._id_offset]
+        id = self.id - self._id_offset
+        nap = dict((ptype, self.hierarchy.grid_active_particle_count[ptype][id]) \
+                   for ptype in self.hierarchy.grid_active_particle_count)
+        return nap
 
 class EnzoGridInMemory(EnzoGrid):
     __slots__ = ['proc_num']
@@ -322,11 +325,9 @@
         super(EnzoHierarchy, self)._initialize_grid_arrays()
         if "AppendActiveParticleType" in self.parameters.keys() and \
                 len(self.parameters["AppendActiveParticleType"]):
-            pdtype = [(ptype, 'i4') for ptype in
-                self.parameters["AppendActiveParticleType"]]
-        else:
-            pdtype = None
-        self.grid_active_particle_count = np.zeros(self.num_grids, dtype=pdtype)
+            gac = dict((ptype, np.zeros(self.num_grids, dtype='i4')) \
+                       for ptype in self.parameters["AppendActiveParticleType"])
+            self.grid_active_particle_count = gac
 
     def _fill_arrays(self, ei, si, LE, RE, npart, nap):
         self.grid_dimensions.flat[:] = ei

diff -r 67fab8ea9ea6841dae5b9000537f2ed4575fff3d -r 4767e46461c0bb35e0190155a23aaba95d6d7ce8 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -63,7 +63,9 @@
                 if f is None:
                     #print "Opening (count) %s" % g.filename
                     f = h5py.File(g.filename, "r")
-                if g.NumberOfParticles == 0: continue
+                nap = sum(g.NumberOfActiveParticles.values())
+                if g.NumberOfParticles == 0 and nap == 0:
+                    continue
                 ds = f.get("/Grid%08i" % g.id)
                 for ptype, field_list in sorted(ptf.items()):
                     if ptype != "io":
@@ -87,7 +89,9 @@
                 if f is None:
                     #print "Opening (read) %s" % g.filename
                     f = h5py.File(g.filename, "r")
-                if g.NumberOfParticles == 0: continue
+                nap = sum(g.NumberOfActiveParticles.values())
+                if g.NumberOfParticles == 0 and nap == 0:
+                    continue
                 ds = f.get("/Grid%08i" % g.id)
                 for ptype, field_list in sorted(ptf.items()):
                     if ptype != "io":


https://bitbucket.org/yt_analysis/yt/commits/f5643d2a09f8/
Changeset:   f5643d2a09f8
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-11-08 01:54:40
Summary:     Ensure that active particle entries in particle_list do not get clobbered.
Affected #:  1 file

diff -r 4767e46461c0bb35e0190155a23aaba95d6d7ce8 -r f5643d2a09f83ba186599100c3fbcbdbb6329677 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -839,14 +839,14 @@
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
         self.particle_types = ["io"]
-        for ptype in self.parameters.get("AppendActiveParticleType", []):
-            self.particle_types.append(ptype)
         if self.parameters["NumberOfParticles"] > 0 and \
             "AppendActiveParticleType" in self.parameters.keys():
             # If this is the case, then we know we should have a DarkMatter
             # particle type, and we don't need the "io" type.
             self.particle_types = ["DarkMatter"]
             self.parameters["AppendActiveParticleType"].append("DarkMatter")
+        for ptype in self.parameters.get("AppendActiveParticleType", []):
+            self.particle_types.append(ptype)
         self.particle_types = tuple(self.particle_types)
 
         if self.dimensionality == 1:


https://bitbucket.org/yt_analysis/yt/commits/4a304d01aef9/
Changeset:   4a304d01aef9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-08 19:44:57
Summary:     Changing return value to {} from 0 for NumberOfActiveParticles.
Affected #:  1 file

diff -r f5643d2a09f83ba186599100c3fbcbdbb6329677 -r 4a304d01aef985b55fc8ce9c0a6706b308c3412f yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -121,7 +121,7 @@
 
     @property
     def NumberOfActiveParticles(self):
-        if not hasattr(self.hierarchy, "grid_active_particle_count"): return 0
+        if not hasattr(self.hierarchy, "grid_active_particle_count"): return {}
         id = self.id - self._id_offset
         nap = dict((ptype, self.hierarchy.grid_active_particle_count[ptype][id]) \
                    for ptype in self.hierarchy.grid_active_particle_count)


https://bitbucket.org/yt_analysis/yt/commits/d8284f61124e/
Changeset:   d8284f61124e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-12 15:56:29
Summary:     Adding periodic (default: False) to to_frb.
Affected #:  1 file

diff -r 4a304d01aef985b55fc8ce9c0a6706b308c3412f -r d8284f61124e18ad74e43f7b581755c7776441ab yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -661,7 +661,8 @@
         return pw
 
 
-    def to_frb(self, width, resolution, center=None, height=None):
+    def to_frb(self, width, resolution, center=None, height=None,
+               periodic = False):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
 
@@ -686,6 +687,9 @@
         center : array-like of floats, optional
             The center of the FRB.  If not specified, defaults to the center of
             the current object.
+        periodic : bool
+            Should the returned Fixed Resolution Buffer be periodic?  (default:
+            False).
 
         Returns
         -------
@@ -726,7 +730,8 @@
         yax = y_dict[self.axis]
         bounds = (center[xax] - width*0.5, center[xax] + width*0.5,
                   center[yax] - height*0.5, center[yax] + height*0.5)
-        frb = FixedResolutionBuffer(self, bounds, resolution)
+        frb = FixedResolutionBuffer(self, bounds, resolution,
+                                    periodic = periodic)
         return frb
 
 class YTSelectionContainer3D(YTSelectionContainer):


https://bitbucket.org/yt_analysis/yt/commits/5d18ef99633e/
Changeset:   5d18ef99633e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-14 13:50:24
Summary:     Fixing a regression in off axis cutting planes.
Affected #:  1 file

diff -r d8284f61124e18ad74e43f7b581755c7776441ab -r 5d18ef99633efa85f5ea39c63d1509554b759eb8 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -359,7 +359,7 @@
             x = self._current_chunk.fcoords[:,0] - self.center[0]
             y = self._current_chunk.fcoords[:,1] - self.center[1]
             z = self._current_chunk.fcoords[:,2] - self.center[2]
-            tr = np.zeros(self.size, dtype='float64')
+            tr = np.zeros(x.size, dtype='float64')
             tr += x * self._x_vec[0]
             tr += y * self._x_vec[1]
             tr += z * self._x_vec[2]
@@ -368,7 +368,7 @@
             x = self._current_chunk.fcoords[:,0] - self.center[0]
             y = self._current_chunk.fcoords[:,1] - self.center[1]
             z = self._current_chunk.fcoords[:,2] - self.center[2]
-            tr = np.zeros(self.size, dtype='float64')
+            tr = np.zeros(x.size, dtype='float64')
             tr += x * self._y_vec[0]
             tr += y * self._y_vec[1]
             tr += z * self._y_vec[2]
@@ -377,7 +377,7 @@
             x = self._current_chunk.fcoords[:,0] - self.center[0]
             y = self._current_chunk.fcoords[:,1] - self.center[1]
             z = self._current_chunk.fcoords[:,2] - self.center[2]
-            tr = np.zeros(self.size, dtype='float64')
+            tr = np.zeros(x.size, dtype='float64')
             tr += x * self._norm_vec[0]
             tr += y * self._norm_vec[1]
             tr += z * self._norm_vec[2]


https://bitbucket.org/yt_analysis/yt/commits/28de35a5f913/
Changeset:   28de35a5f913
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-11-15 05:50:08
Summary:     Preventing a segfault due to garbage domain dimensions.
Affected #:  1 file

diff -r 5d18ef99633efa85f5ea39c63d1509554b759eb8 -r 28de35a5f9134a31546184b96913c53b1e474887 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -174,6 +174,8 @@
         self._unit_base = unit_base
         if bounding_box is not None:
             bbox = np.array(bounding_box, dtype="float64")
+            if bbox.shape == (2, 3):
+                bbox = bbox.transpose()
             self.domain_left_edge = bbox[:,0]
             self.domain_right_edge = bbox[:,1]
         else:


https://bitbucket.org/yt_analysis/yt/commits/a65e2f8c6457/
Changeset:   a65e2f8c6457
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-15 20:29:01
Summary:     Fixing LeftEdge and RightEdge for octree subsets.
Affected #:  1 file

diff -r 28de35a5f9134a31546184b96913c53b1e474887 -r a65e2f8c645755837f9016363ef470ec71e37371 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -272,13 +272,13 @@
     @property
     def LeftEdge(self):
         LE = (self._fcoords[0,0,0,self.ind,:]
-            - self._fwidth[0,0,0,self.ind,:])*0.5
+            - self._fwidth[0,0,0,self.ind,:]*0.5)
         return LE
 
     @property
     def RightEdge(self):
         RE = (self._fcoords[-1,-1,-1,self.ind,:]
-            + self._fwidth[-1,-1,-1,self.ind,:])*0.5
+            + self._fwidth[-1,-1,-1,self.ind,:]*0.5)
         return RE
 
     @property


https://bitbucket.org/yt_analysis/yt/commits/3b4d8dc5cb0d/
Changeset:   3b4d8dc5cb0d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-23 17:10:36
Summary:     Fixing smoothing to += rather than = .  Normalize by weight.

Also adding find_max to generic geometry_handler.
Affected #:  2 files

diff -r 4c112d5ad497f5f4c000530939d03ed4dde34cc1 -r 3b4d8dc5cb0d8c33c1775d28d8c483eb6eda1e3e yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -459,6 +459,18 @@
             del self._data_file
             self._data_file = None
 
+    def find_max(self, field):
+        """
+        Returns (value, center) of location of maximum for a given field.
+        """
+        mylog.debug("Searching for maximum value of %s", field)
+        source = self.all_data()
+        max_val, maxi, mx, my, mz = \
+            source.quantities["MaxLocation"](field)
+        mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
+              max_val, mx, my, mz)
+        return max_val, np.array([mx, my, mz], dtype="float64")
+
     def _add_object_class(self, name, class_name, base, dd):
         self.object_types.append(name)
         obj = type(class_name, (base,), dd)
@@ -674,3 +686,4 @@
         g = self.queue.pop(0)
         g._initialize_cache(self.cache.pop(g.id, {}))
         return g
+

diff -r 4c112d5ad497f5f4c000530939d03ed4dde34cc1 -r 3b4d8dc5cb0d8c33c1775d28d8c483eb6eda1e3e yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -333,8 +333,9 @@
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef int n, fi
-        cdef np.float64_t weight, r2, val
+        cdef np.float64_t weight, r2, val, norm
         cdef np.int64_t pn
+        # rho_i = sum(j = 1 .. n) m_j * W_ij
         for n in range(self.curn):
             # No normalization for the moment.
             # fields[0] is the smoothing length.
@@ -345,7 +346,10 @@
             # Mass of the particle times the value divided by the Density
             for fi in range(self.nfields - 3):
                 val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
-                self.fp[fi + 3][gind(i,j,k,dim) + offset] = val * weight
+                self.fp[fi + 3][gind(i,j,k,dim) + offset] += val * weight
+            norm += weight
+        for fi in range(self.nfields - 3):
+            self.fp[fi + 3][gind(i,j,k,dim) + offset] /= norm
         return
 
 simple_neighbor_smooth = SimpleNeighborSmooth


https://bitbucket.org/yt_analysis/yt/commits/9b320447c009/
Changeset:   9b320447c009
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-21 16:48:13
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #122)

Fixing smoothing to += rather than = .  Normalize by weight.
Affected #:  2 files

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 9b320447c00940ac65dfa6839f718326321bab23 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -465,6 +465,18 @@
             del self._data_file
             self._data_file = None
 
+    def find_max(self, field):
+        """
+        Returns (value, center) of location of maximum for a given field.
+        """
+        mylog.debug("Searching for maximum value of %s", field)
+        source = self.all_data()
+        max_val, maxi, mx, my, mz = \
+            source.quantities["MaxLocation"](field)
+        mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
+              max_val, mx, my, mz)
+        return max_val, np.array([mx, my, mz], dtype="float64")
+
     def _add_object_class(self, name, class_name, base, dd):
         self.object_types.append(name)
         obj = type(class_name, (base,), dd)
@@ -680,3 +692,4 @@
         g = self.queue.pop(0)
         g._initialize_cache(self.cache.pop(g.id, {}))
         return g
+

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 9b320447c00940ac65dfa6839f718326321bab23 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -333,8 +333,9 @@
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef int n, fi
-        cdef np.float64_t weight, r2, val
+        cdef np.float64_t weight, r2, val, norm
         cdef np.int64_t pn
+        # rho_i = sum(j = 1 .. n) m_j * W_ij
         for n in range(self.curn):
             # No normalization for the moment.
             # fields[0] is the smoothing length.
@@ -345,7 +346,10 @@
             # Mass of the particle times the value divided by the Density
             for fi in range(self.nfields - 3):
                 val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
-                self.fp[fi + 3][gind(i,j,k,dim) + offset] = val * weight
+                self.fp[fi + 3][gind(i,j,k,dim) + offset] += val * weight
+            norm += weight
+        for fi in range(self.nfields - 3):
+            self.fp[fi + 3][gind(i,j,k,dim) + offset] /= norm
         return
 
 simple_neighbor_smooth = SimpleNeighborSmooth


https://bitbucket.org/yt_analysis/yt/commits/41070b7758d5/
Changeset:   41070b7758d5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-15 20:48:39
Summary:     This should eliminate segfaults for particles that are OOB.
Affected #:  3 files

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 41070b7758d557be6c0b40457b2fcf4ea30ced9c yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -116,6 +116,12 @@
             dt = ds.dtype.newbyteorder("N") # Native
             pos = np.empty(ds.shape, dtype=dt)
             pos[:] = ds
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
             regions.add_data_file(pos, data_file.file_id)
             morton[ind:ind+pos.shape[0]] = compute_morton(
                 pos[:,0], pos[:,1], pos[:,2],

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 41070b7758d557be6c0b40457b2fcf4ea30ced9c yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -198,6 +198,7 @@
         free_octs(self.cont)
         if self.root_mesh == NULL: return
         for i in range(self.nn[0]):
+            if self.root_mesh[i] == NULL: continue
             for j in range(self.nn[1]):
                 if self.root_mesh[i][j] == NULL: continue
                 free(self.root_mesh[i][j])

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 41070b7758d557be6c0b40457b2fcf4ea30ced9c yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -47,9 +47,13 @@
         #Call the freemem ops on every ocy
         #of the root mesh recursively
         cdef i, j, k
+        if self.root_mesh == NULL: return
         for i in range(self.nn[0]):
+            if self.root_mesh[i] == NULL: continue
             for j in range(self.nn[1]):
+                if self.root_mesh[i][j] == NULL: continue
                 for k in range(self.nn[2]):
+                    if self.root_mesh[i][j][k] == NULL: continue
                     self.visit_free(self.root_mesh[i][j][k])
         free(self.oct_list)
         free(self.dom_offsets)


https://bitbucket.org/yt_analysis/yt/commits/cade81eb699d/
Changeset:   cade81eb699d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-15 21:01:27
Summary:     Removing unnecessary pos allocate and adding domain bounds to GadgetBinary.
Affected #:  1 file

diff -r 41070b7758d557be6c0b40457b2fcf4ea30ced9c -r cade81eb699d38da8e83fa1b207f8c5c37978b18 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -280,13 +280,18 @@
         DLE = data_file.pf.domain_left_edge
         DRE = data_file.pf.domain_right_edge
         dx = (DRE - DLE) / 2**_ORDER_MAX
-        pos = np.empty((count, 3), dtype='float64')
         with open(data_file.filename, "rb") as f:
             # We add on an additionally 4 for the first record.
             f.seek(data_file._position_offset + 4)
             # The first total_particles * 3 values are positions
             pp = np.fromfile(f, dtype = 'float32', count = count*3)
             pp.shape = (count, 3)
+            if np.any(pp.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pp.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pp.min(axis=0),
+                                       pp.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
         regions.add_data_file(pp, data_file.file_id)
         morton = compute_morton(pp[:,0], pp[:,1], pp[:,2], DLE, DRE)
         return morton


https://bitbucket.org/yt_analysis/yt/commits/74cf7559aee1/
Changeset:   74cf7559aee1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-21 16:49:05
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #133)

This should eliminate segfaults for particles that are OOB.
Affected #:  3 files

diff -r 9b320447c00940ac65dfa6839f718326321bab23 -r 74cf7559aee16c5eb5128410500c8cb0f3b8fd40 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -116,6 +116,12 @@
             dt = ds.dtype.newbyteorder("N") # Native
             pos = np.empty(ds.shape, dtype=dt)
             pos[:] = ds
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
             regions.add_data_file(pos, data_file.file_id)
             morton[ind:ind+pos.shape[0]] = compute_morton(
                 pos[:,0], pos[:,1], pos[:,2],
@@ -274,13 +280,18 @@
         DLE = data_file.pf.domain_left_edge
         DRE = data_file.pf.domain_right_edge
         dx = (DRE - DLE) / 2**_ORDER_MAX
-        pos = np.empty((count, 3), dtype='float64')
         with open(data_file.filename, "rb") as f:
             # We add on an additionally 4 for the first record.
             f.seek(data_file._position_offset + 4)
             # The first total_particles * 3 values are positions
             pp = np.fromfile(f, dtype = 'float32', count = count*3)
             pp.shape = (count, 3)
+            if np.any(pp.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pp.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pp.min(axis=0),
+                                       pp.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
         regions.add_data_file(pp, data_file.file_id)
         morton = compute_morton(pp[:,0], pp[:,1], pp[:,2], DLE, DRE)
         return morton

diff -r 9b320447c00940ac65dfa6839f718326321bab23 -r 74cf7559aee16c5eb5128410500c8cb0f3b8fd40 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -198,6 +198,7 @@
         free_octs(self.cont)
         if self.root_mesh == NULL: return
         for i in range(self.nn[0]):
+            if self.root_mesh[i] == NULL: continue
             for j in range(self.nn[1]):
                 if self.root_mesh[i][j] == NULL: continue
                 free(self.root_mesh[i][j])

diff -r 9b320447c00940ac65dfa6839f718326321bab23 -r 74cf7559aee16c5eb5128410500c8cb0f3b8fd40 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -47,9 +47,13 @@
         #Call the freemem ops on every ocy
         #of the root mesh recursively
         cdef i, j, k
+        if self.root_mesh == NULL: return
         for i in range(self.nn[0]):
+            if self.root_mesh[i] == NULL: continue
             for j in range(self.nn[1]):
+                if self.root_mesh[i][j] == NULL: continue
                 for k in range(self.nn[2]):
+                    if self.root_mesh[i][j][k] == NULL: continue
                     self.visit_free(self.root_mesh[i][j][k])
         free(self.oct_list)
         free(self.dom_offsets)


https://bitbucket.org/yt_analysis/yt/commits/b0a426d61bca/
Changeset:   b0a426d61bca
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-17 00:51:06
Summary:     Re-enable Enzo caching.
Affected #:  1 file

diff -r 5d18ef99633efa85f5ea39c63d1509554b759eb8 -r b0a426d61bcab4157407e3abb781e815e9f4bbba yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -154,6 +154,29 @@
             if fid: fid.close()
         return rv
 
+    def _read_chunk_data(self, chunk, fields):
+        fid = fn = None
+        rv = {}
+        mylog.debug("Preloading fields %s", fields)
+        for g in chunk.objs:
+            rv[g.id] = gf = {}
+            if g.filename is None: continue
+            elif g.filename != fn:
+                if fid is not None: fid.close()
+                fid = None
+            if fid is None:
+                fid = h5py.h5f.open(g.filename, h5py.h5f.ACC_RDONLY)
+                fn = g.filename
+            data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
+            data_view = data.swapaxes(0,2)
+            for field in fields:
+                ftype, fname = field
+                dg = h5py.h5d.open(fid, "/Grid%08i/%s" % (g.id, fname))
+                dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+                gf[field] = data_view.copy()
+        if fid: fid.close()
+        return rv
+
 class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
     _data_style = "enzo_packed_3d_gz"
 


https://bitbucket.org/yt_analysis/yt/commits/38d2692e328d/
Changeset:   38d2692e328d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-17 00:51:25
Summary:     Enable FLASH caching.
Affected #:  2 files

diff -r b0a426d61bcab4157407e3abb781e815e9f4bbba -r 38d2692e328d62d0641173bbaff1bbebf5de4993 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -53,6 +53,7 @@
 class FLASHHierarchy(GridGeometryHandler):
 
     grid = FLASHGrid
+    _preload_implemented = True
     
     def __init__(self,pf,data_style='flash_hdf5'):
         self.data_style = data_style

diff -r b0a426d61bcab4157407e3abb781e815e9f4bbba -r 38d2692e328d62d0641173bbaff1bbebf5de4993 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -85,3 +85,18 @@
                     data = ds[g.id - g._id_offset,:,:,:].transpose()
                     ind += g.select(selector, data, rv[field], ind) # caches
         return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        f = self._handle
+        rv = {}
+        for g in chunk.objs:
+            rv[g.id] = {}
+        for field in fields:
+            ftype, fname = field
+            ds = f["/%s" % fname]
+            ind = 0
+            for g in chunk.objs:
+                data = ds[g.id - g._id_offset,:,:,:].transpose()
+                rv[g.id][field] = data
+        return rv
+


https://bitbucket.org/yt_analysis/yt/commits/bbb78058f4f1/
Changeset:   bbb78058f4f1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-21 16:54:29
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #134)

Re-enable spatial caching for Enzo and implement it for FLASH
Affected #:  3 files

diff -r 74cf7559aee16c5eb5128410500c8cb0f3b8fd40 -r bbb78058f4f1107c257cb6652b6b1feebb06c467 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -154,6 +154,29 @@
             if fid: fid.close()
         return rv
 
+    def _read_chunk_data(self, chunk, fields):
+        fid = fn = None
+        rv = {}
+        mylog.debug("Preloading fields %s", fields)
+        for g in chunk.objs:
+            rv[g.id] = gf = {}
+            if g.filename is None: continue
+            elif g.filename != fn:
+                if fid is not None: fid.close()
+                fid = None
+            if fid is None:
+                fid = h5py.h5f.open(g.filename, h5py.h5f.ACC_RDONLY)
+                fn = g.filename
+            data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
+            data_view = data.swapaxes(0,2)
+            for field in fields:
+                ftype, fname = field
+                dg = h5py.h5d.open(fid, "/Grid%08i/%s" % (g.id, fname))
+                dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+                gf[field] = data_view.copy()
+        if fid: fid.close()
+        return rv
+
 class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
     _data_style = "enzo_packed_3d_gz"
 

diff -r 74cf7559aee16c5eb5128410500c8cb0f3b8fd40 -r bbb78058f4f1107c257cb6652b6b1feebb06c467 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -53,6 +53,7 @@
 class FLASHHierarchy(GridGeometryHandler):
 
     grid = FLASHGrid
+    _preload_implemented = True
     
     def __init__(self,pf,data_style='flash_hdf5'):
         self.data_style = data_style

diff -r 74cf7559aee16c5eb5128410500c8cb0f3b8fd40 -r bbb78058f4f1107c257cb6652b6b1feebb06c467 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -85,3 +85,18 @@
                     data = ds[g.id - g._id_offset,:,:,:].transpose()
                     ind += g.select(selector, data, rv[field], ind) # caches
         return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        f = self._handle
+        rv = {}
+        for g in chunk.objs:
+            rv[g.id] = {}
+        for field in fields:
+            ftype, fname = field
+            ds = f["/%s" % fname]
+            ind = 0
+            for g in chunk.objs:
+                data = ds[g.id - g._id_offset,:,:,:].transpose()
+                rv[g.id][field] = data
+        return rv
+


https://bitbucket.org/yt_analysis/yt/commits/814c19f73b48/
Changeset:   814c19f73b48
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 22:04:03
Summary:     Merging from yt-2.x
Affected #:  53 files

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
 include distribute_setup.py README* CREDITS COPYING.txt CITATION
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE kdtree2-README
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there!  You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses.  It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there!  You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms.  It's written in 
+python and heavily leverages both NumPy and Matplotlib for fast arrays and 
+visualization, respectively.
 
 Full documentation and a user community can be found at:
 
 http://yt-project.org/
+
 http://yt-project.org/doc/
 
 If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
 doc/install_script.sh .  You will have to set the destination directory, and
 there are options available, but it should be straightforward.
 
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or 
+ways to help development, please visit our website.
 
 Enjoy!

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
 get_ytdata xray_emissivity.h5
 
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
@@ -918,6 +923,8 @@
 do_setup_py $SYMPY
 [ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
+( ${DEST_DIR}/bin/pip install jinja2 2>&1 ) 1>> ${LOG_FILE}
+
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -86,6 +86,10 @@
     #Empty fit without any lines
     yFit = na.ones(len(fluxData))
 
+    #Force the first and last flux pixel to be 1 to prevent OOB
+    fluxData[0]=1
+    fluxData[-1]=1
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -120,9 +124,10 @@
                     z,fitLim,minError*(b[2]-b[1]),speciesDict)
 
             #Check existence of partner lines if applicable
-            newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                    b, minError*(b[2]-b[1]),
-                    x0, xRes, speciesDict)
+            if len(speciesDict['wavelength']) != 1:
+                newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
+                        b, minError*(b[2]-b[1]),
+                        x0, xRes, speciesDict)
 
             #If flagged as a bad fit, species is lyman alpha,
             #   and it may be a saturated line, use special tools
@@ -548,6 +553,10 @@
         #Index of the redshifted wavelength
         indexRedWl = (redWl-x0)/xRes
 
+        #Check to see if even in flux range
+        if indexRedWl > len(y):
+            return False
+
         #Check if surpasses minimum absorption bound
         if y[int(indexRedWl)]>fluxMin:
             return False

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,16 @@
 from .radmc3d_export.api import \
     RadMC3DWriter
 
+from .particle_trajectories.api import \
+    ParticleTrajectories
+
+from .photon_simulator.api import \
+     PhotonList, \
+     EventList, \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel, \
+     PhotonModel, \
+     ThermalPhotonModel

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
         cosmology_splice = []
-
+ 
+        if near_redshift == far_redshift:
+            self.simulation.get_time_series(redshifts=[near_redshift])
+            cosmology_splice.append({'time': self.simulation[0].current_time,
+                                     'redshift': self.simulation[0].current_redshift,
+                                     'filename': os.path.join(self.simulation[0].fullpath,
+                                                              self.simulation[0].basename),
+                                     'next': None})
+            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+                       (cosmology_splice[0]['filename'], near_redshift))
+            return cosmology_splice
+        
         # Use minimum number of datasets to go from z_i to z_f.
         if minimal:
 

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
     only_on_root, \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import \
+     speed_of_light_cgs, \
+     cm_per_km
 
 class LightRay(CosmologySplice):
     """
@@ -51,7 +54,9 @@
     near_redshift : float
         The near (lowest) redshift for the light ray.
     far_redshift : float
-        The far (highest) redshift for the light ray.
+        The far (highest) redshift for the light ray.  NOTE: in order 
+        to use only a single dataset in a light ray, set the 
+        near_redshift and far_redshift to be the same.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
@@ -111,65 +116,92 @@
                                        time_data=time_data,
                                        redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, filename=None):
+    def _calculate_light_ray_solution(self, seed=None, 
+                                      start_position=None, end_position=None,
+                                      trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
         np.random.seed(seed)
 
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
+        # If using only one dataset, set start and stop manually.
+        if start_position is not None:
+            if len(self.light_ray_solution) > 1:
+                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+            if not ((end_position is None) ^ (trajectory is None)):
+                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if end_position is not None:
+                self.light_ray_solution[0]['end'] = np.array(end_position)
+            else:
+                # assume trajectory given as r, theta, phi
+                if len(trajectory) != 3:
+                    raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+                r, theta, phi = trajectory
+                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+                  r * np.array([np.cos(phi) * np.sin(theta),
+                                np.sin(phi) * np.sin(theta),
+                                np.cos(theta)])
+            self.light_ray_solution[0]['traversal_box_fraction'] = \
+              vector_length(self.light_ray_solution[0]['start'], 
+                            self.light_ray_solution[0]['end'])
 
-        for q in range(len(self.light_ray_solution)):
-            if (q == len(self.light_ray_solution) - 1):
-                z_next = self.near_redshift
-            else:
-                z_next = self.light_ray_solution[q+1]['redshift']
+        # the normal way (random start positions and trajectories for each dataset)
+        else:
+            
+            # For box coherence, keep track of effective depth travelled.
+            box_fraction_used = 0.0
 
-            # Calculate fraction of box required for a depth of delta z
-            self.light_ray_solution[q]['traversal_box_fraction'] = \
-                self.cosmology.ComovingRadialDistance(\
-                z_next, self.light_ray_solution[q]['redshift']) * \
-                self.simulation.hubble_constant / \
-                self.simulation.box_size
+            for q in range(len(self.light_ray_solution)):
+                if (q == len(self.light_ray_solution) - 1):
+                    z_next = self.near_redshift
+                else:
+                    z_next = self.light_ray_solution[q+1]['redshift']
 
-            # Simple error check to make sure more than 100% of box depth
-            # is never required.
-            if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_ray_solution[q]['redshift'], z_next,
-                             self.light_ray_solution[q]['traversal_box_fraction']))
-                mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_ray_solution[q]['deltazMax'],
-                             self.light_ray_solution[q]['redshift']-z_next))
+                # Calculate fraction of box required for a depth of delta z
+                self.light_ray_solution[q]['traversal_box_fraction'] = \
+                    self.cosmology.ComovingRadialDistance(\
+                    z_next, self.light_ray_solution[q]['redshift']) * \
+                    self.simulation.hubble_constant / \
+                    self.simulation.box_size
 
-            # Get dataset axis and center.
-            # If using box coherence, only get start point and vector if
-            # enough of the box has been used,
-            # or if box_fraction_used will be greater than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used >
-                     self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                # Random start point
-                self.light_ray_solution[q]['start'] = np.random.random(3)
-                theta = np.pi * np.random.random()
-                phi = 2 * np.pi * np.random.random()
-                box_fraction_used = 0.0
-            else:
-                # Use end point of previous segment and same theta and phi.
-                self.light_ray_solution[q]['start'] = \
-                  self.light_ray_solution[q-1]['end'][:]
+                # Simple error check to make sure more than 100% of box depth
+                # is never required.
+                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+                                (self.light_ray_solution[q]['redshift'], z_next,
+                                 self.light_ray_solution[q]['traversal_box_fraction']))
+                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+                                (self.light_ray_solution[q]['deltazMax'],
+                                 self.light_ray_solution[q]['redshift']-z_next))
 
-            self.light_ray_solution[q]['end'] = \
-              self.light_ray_solution[q]['start'] + \
-                self.light_ray_solution[q]['traversal_box_fraction'] * \
-                np.array([np.cos(phi) * np.sin(theta),
-                          np.sin(phi) * np.sin(theta),
-                          np.cos(theta)])
-            box_fraction_used += \
-              self.light_ray_solution[q]['traversal_box_fraction']
+                # Get dataset axis and center.
+                # If using box coherence, only get start point and vector if
+                # enough of the box has been used,
+                # or if box_fraction_used will be greater than 1 after this slice.
+                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+                        (box_fraction_used >
+                         self.minimum_coherent_box_fraction) or \
+                        (box_fraction_used +
+                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    # Random start point
+                    self.light_ray_solution[q]['start'] = np.random.random(3)
+                    theta = np.pi * np.random.random()
+                    phi = 2 * np.pi * np.random.random()
+                    box_fraction_used = 0.0
+                else:
+                    # Use end point of previous segment and same theta and phi.
+                    self.light_ray_solution[q]['start'] = \
+                      self.light_ray_solution[q-1]['end'][:]
+
+                self.light_ray_solution[q]['end'] = \
+                  self.light_ray_solution[q]['start'] + \
+                    self.light_ray_solution[q]['traversal_box_fraction'] * \
+                    np.array([np.cos(phi) * np.sin(theta),
+                              np.sin(phi) * np.sin(theta),
+                              np.cos(theta)])
+                box_fraction_used += \
+                  self.light_ray_solution[q]['traversal_box_fraction']
 
         if filename is not None:
             self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None, fields=None,
+    def make_light_ray(self, seed=None,
+                       start_position=None, end_position=None,
+                       trajectory=None,
+                       fields=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=False,
                        get_nearest_halo=False,
@@ -197,6 +232,19 @@
         seed : int
             Seed for the random number generator.
             Default: None.
+        start_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the starting position of the ray.
+            Default: None.
+        end_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the ending position of the ray.
+            Default: None.
+        trajectory : list of floats
+            Used only if creating a light ray from a single dataset.
+            The (r, theta, phi) direction of the light ray.  Use either 
+        end_position or trajectory, not both.
+            Default: None.
         fields : list
             A list of fields for which to get data.
             Default: None.
@@ -313,7 +361,11 @@
             nearest_halo_fields = []
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+        self._calculate_light_ray_solution(seed=seed, 
+                                           start_position=start_position, 
+                                           end_position=end_position,
+                                           trajectory=trajectory,
+                                           filename=solution_filename)
 
         # Initialize data structures.
         self._data = {}
@@ -335,9 +387,18 @@
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
                                                        njobs=njobs, dynamic=dynamic):
-            mylog.info("Creating ray segment at z = %f." %
-                       my_segment['redshift'])
-            if my_segment['next'] is None:
+
+            # Load dataset for segment.
+            pf = load(my_segment['filename'])
+
+            if self.near_redshift == self.far_redshift:
+                h_vel = cm_per_km * pf.units['mpc'] * \
+                  vector_length(my_segment['start'], my_segment['end']) * \
+                  self.cosmology.HubbleConstantNow * \
+                  self.cosmology.ExpansionFactor(my_segment['redshift'])
+                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+            elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
                 next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Load dataset for segment.
-            pf = load(my_segment['filename'])
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
 

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectories object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields=None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None: fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first interpolate the field to the particle positions
+        # and then return the field. 
+
+        pf = self.pfs[0]
+        self.derived_field_list = pf.h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if pf.field_info[field].particle_type]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each dataset, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs:
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+                print "Not all requested particle ids contained in this dataset!"
+                raise IndexError
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = np.array(self.times)
+
+        # Now instantiate the requested fields 
+        for field in fields:
+            self._get_data(field)
+            
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+        if key == "particle_time":
+            return self.times
+        if not self.field_data.has_key(key):
+            self._get_data(key)
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        for field in fields:
+            if not self.field_data.has_key(field):
+                self._get_data(field)
+                
+    def _get_data(self, field):
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        if not self.field_data.has_key(field):
+            particles = np.empty((0))
+            step = int(0)
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+                if field in self.particle_fields:
+                    # This is easy... just get the particle fields
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = np.append(particles, pfield[sort])
+                else:
+                    # This is hard... must loop over grids
+                    pfield = np.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    for grid in particle_grids:
+                        cube = grid.retrieve_ghost_zones(1, [field])
+                        CICSample_3(x,y,z,pfield,
+                                    self.num_indices,
+                                    cube[field],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    np.float64(grid['dx']))
+                    particles = np.append(particles, pfield)
+                step += 1
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in xrange(self.num_indices):
+            outlines = [first_str]
+            for it in xrange(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fields = [field for field in sorted(self.field_data.keys())]
+        fid.create_dataset("particle_indices", dtype=np.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        for field in fields:
+            fid.create_dataset("%s" % field, data=self[field])
+        fid.close()

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('particle_trajectories', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/photon_simulator/api.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.analysis_modules.photon_simulator.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .photon_models import \
+     PhotonModel, \
+     ThermalPhotonModel
+
+from .photon_simulator import \
+     PhotonList, \
+     EventList
+
+from .spectral_models import \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 yt/analysis_modules/photon_simulator/photon_models.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -0,0 +1,205 @@
+"""
+Classes for specific photon models
+
+The algorithms used here are based off of the method used by the
+PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
+developed by Veronica Biffi and Klaus Dolag. References for
+PHOX may be found at:
+
+Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545
+http://adsabs.harvard.edu/abs/2012MNRAS.420.3545B
+
+Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395
+http://adsabs.harvard.edu/abs/2013MNRAS.428.1395B
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import *
+from yt.utilities.physical_constants import \
+     mp, cm_per_km, K_per_keV, cm_per_mpc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system
+
+N_TBIN = 10000
+TMIN = 8.08e-2
+TMAX = 50.
+
+comm = communication_system.communicators[-1]
+
+class PhotonModel(object):
+
+    def __init__(self):
+        pass
+
+    def __call__(self, data_source, parameters):
+        photons = {}
+        return photons
+
+class ThermalPhotonModel(PhotonModel):
+    r"""
+    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    
+    Parameters
+    ----------
+
+    spectral_model : `SpectralModel`
+        A thermal spectral model instance, either of `XSpecThermalModel`
+        or `TableApecModel`. 
+    X_H : float, optional
+        The hydrogen mass fraction.
+    Zmet : float or string, optional
+        The metallicity. If a float, assumes a constant metallicity throughout.
+        If a string, is taken to be the name of the metallicity field.
+    """
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+        self.X_H = X_H
+        self.Zmet = Zmet
+        self.spectral_model = spectral_model
+
+    def __call__(self, data_source, parameters):
+        
+        pf = data_source.pf
+
+        exp_time = parameters["FiducialExposureTime"]
+        area = parameters["FiducialArea"]
+        redshift = parameters["FiducialRedshift"]
+        D_A = parameters["FiducialAngularDiameterDistance"]*cm_per_mpc
+        dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
+                
+        vol_scale = pf.units["cm"]**(-3)/np.prod(pf.domain_width)
+        
+        num_cells = data_source["Temperature"].shape[0]
+        start_c = comm.rank*num_cells/comm.size
+        end_c = (comm.rank+1)*num_cells/comm.size
+        
+        kT = data_source["Temperature"][start_c:end_c].copy()/K_per_keV
+        vol = data_source["CellVolume"][start_c:end_c].copy()
+        dx = data_source["dx"][start_c:end_c].copy()
+        EM = (data_source["Density"][start_c:end_c].copy()/mp)**2
+        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+    
+        data_source.clear_data()
+    
+        x = data_source["x"][start_c:end_c].copy()
+        y = data_source["y"][start_c:end_c].copy()
+        z = data_source["z"][start_c:end_c].copy()
+    
+        data_source.clear_data()
+        
+        vx = data_source["x-velocity"][start_c:end_c].copy()
+        vy = data_source["y-velocity"][start_c:end_c].copy()
+        vz = data_source["z-velocity"][start_c:end_c].copy()
+    
+        if isinstance(self.Zmet, basestring):
+            metalZ = data_source[self.Zmet][start_c:end_c].copy()
+        else:
+            metalZ = self.Zmet*np.ones(EM.shape)
+        
+        data_source.clear_data()
+
+        idxs = np.argsort(kT)
+        dshape = idxs.shape
+
+        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
+        dkT = kT_bins[1]-kT_bins[0]
+        kT_idxs = np.digitize(kT[idxs], kT_bins)
+        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
+        bcounts = np.bincount(kT_idxs).astype("int")
+        bcounts = bcounts[bcounts > 0]
+        n = int(0)
+        bcell = []
+        ecell = []
+        for bcount in bcounts:
+            bcell.append(n)
+            ecell.append(n+bcount)
+            n += bcount
+        kT_idxs = np.unique(kT_idxs)
+        
+        self.spectral_model.prepare()
+        energy = self.spectral_model.ebins
+    
+        cell_em = EM[idxs]*vol_scale
+        cell_vol = vol[idxs]*vol_scale
+    
+        number_of_photons = np.zeros(dshape, dtype='uint64')
+        energies = []
+    
+        u = np.random.random(cell_em.shape)
+        
+        pbar = get_pbar("Generating Photons", dshape[0])
+
+        for i, ikT in enumerate(kT_idxs):
+
+            ncells = int(bcounts[i])
+            ibegin = bcell[i]
+            iend = ecell[i]
+            kT = kT_bins[ikT] + 0.5*dkT
+        
+            em_sum_c = cell_em[ibegin:iend].sum()
+            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+            
+            cspec, mspec = self.spectral_model.get_spectrum(kT)
+            cspec *= dist_fac*em_sum_c/vol_scale
+            mspec *= dist_fac*em_sum_m/vol_scale
+        
+            cumspec_c = np.cumsum(cspec)
+            counts_c = cumspec_c[:]/cumspec_c[-1]
+            counts_c = np.insert(counts_c, 0, 0.0)
+            tot_ph_c = cumspec_c[-1]*area*exp_time
+
+            cumspec_m = np.cumsum(mspec)
+            counts_m = cumspec_m[:]/cumspec_m[-1]
+            counts_m = np.insert(counts_m, 0, 0.0)
+            tot_ph_m = cumspec_m[-1]*area*exp_time
+        
+            for icell in xrange(ibegin, iend):
+            
+                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+            
+                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+            
+                cell_n = cell_n_c + cell_n_m
+
+                if cell_n > 0:
+                    number_of_photons[icell] = cell_n
+                    randvec_c = np.random.uniform(size=cell_n_c)
+                    randvec_c.sort()
+                    randvec_m = np.random.uniform(size=cell_n_m)
+                    randvec_m.sort()
+                    cell_e_c = np.interp(randvec_c, counts_c, energy)
+                    cell_e_m = np.interp(randvec_m, counts_m, energy)
+                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
+                
+                pbar.update(icell)
+
+        pbar.finish()
+            
+        active_cells = number_of_photons > 0
+        idxs = idxs[active_cells]
+        
+        photons = {}
+
+        src_ctr = parameters["center"]
+        
+        photons["x"] = (x[idxs]-src_ctr[0])*pf.units["kpc"]
+        photons["y"] = (y[idxs]-src_ctr[1])*pf.units["kpc"]
+        photons["z"] = (z[idxs]-src_ctr[2])*pf.units["kpc"]
+        photons["vx"] = vx[idxs]/cm_per_km
+        photons["vy"] = vy[idxs]/cm_per_km
+        photons["vz"] = vz[idxs]/cm_per_km
+        photons["dx"] = dx[idxs]*pf.units["kpc"]
+        photons["NumberOfPhotons"] = number_of_photons[active_cells]
+        photons["Energy"] = np.concatenate(energies)
+    
+        return photons

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/7dff4f0e56b7/
Changeset:   7dff4f0e56b7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 22:05:01
Summary:     Removing NyxStaticOutput from mods
Affected #:  1 file

diff -r 814c19f73b48fd59e31ad9d9cf241e2fd12f2210 -r 7dff4f0e56b7805edd0f48d670fd794605ca0635 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -66,7 +66,7 @@
 
 # Boxlib stuff
 from yt.frontends.boxlib.api import \
-    BoxlibStaticOutput, NyxStaticOutput
+    BoxlibStaticOutput
 
 # Orion stuff
 from yt.frontends.boxlib.api import \


https://bitbucket.org/yt_analysis/yt/commits/eea2615f7e8a/
Changeset:   eea2615f7e8a
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-11-21 17:18:56
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #135)

Merge from most of the 2.6 development
Affected #:  53 files

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
 include distribute_setup.py README* CREDITS COPYING.txt CITATION
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE kdtree2-README
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there!  You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses.  It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there!  You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms.  It's written in 
+python and heavily leverages both NumPy and Matplotlib for fast arrays and 
+visualization, respectively.
 
 Full documentation and a user community can be found at:
 
 http://yt-project.org/
+
 http://yt-project.org/doc/
 
 If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
 doc/install_script.sh .  You will have to set the destination directory, and
 there are options available, but it should be straightforward.
 
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or 
+ways to help development, please visit our website.
 
 Enjoy!

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
 get_ytdata xray_emissivity.h5
 
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
@@ -918,6 +923,8 @@
 do_setup_py $SYMPY
 [ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
+( ${DEST_DIR}/bin/pip install jinja2 2>&1 ) 1>> ${LOG_FILE}
+
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -86,6 +86,10 @@
     #Empty fit without any lines
     yFit = na.ones(len(fluxData))
 
+    #Force the first and last flux pixel to be 1 to prevent OOB
+    fluxData[0]=1
+    fluxData[-1]=1
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -120,9 +124,10 @@
                     z,fitLim,minError*(b[2]-b[1]),speciesDict)
 
             #Check existence of partner lines if applicable
-            newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                    b, minError*(b[2]-b[1]),
-                    x0, xRes, speciesDict)
+            if len(speciesDict['wavelength']) != 1:
+                newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
+                        b, minError*(b[2]-b[1]),
+                        x0, xRes, speciesDict)
 
             #If flagged as a bad fit, species is lyman alpha,
             #   and it may be a saturated line, use special tools
@@ -548,6 +553,10 @@
         #Index of the redshifted wavelength
         indexRedWl = (redWl-x0)/xRes
 
+        #Check to see if even in flux range
+        if indexRedWl > len(y):
+            return False
+
         #Check if surpasses minimum absorption bound
         if y[int(indexRedWl)]>fluxMin:
             return False

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,16 @@
 from .radmc3d_export.api import \
     RadMC3DWriter
 
+from .particle_trajectories.api import \
+    ParticleTrajectories
+
+from .photon_simulator.api import \
+     PhotonList, \
+     EventList, \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel, \
+     PhotonModel, \
+     ThermalPhotonModel

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
         cosmology_splice = []
-
+ 
+        if near_redshift == far_redshift:
+            self.simulation.get_time_series(redshifts=[near_redshift])
+            cosmology_splice.append({'time': self.simulation[0].current_time,
+                                     'redshift': self.simulation[0].current_redshift,
+                                     'filename': os.path.join(self.simulation[0].fullpath,
+                                                              self.simulation[0].basename),
+                                     'next': None})
+            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+                       (cosmology_splice[0]['filename'], near_redshift))
+            return cosmology_splice
+        
         # Use minimum number of datasets to go from z_i to z_f.
         if minimal:
 

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
     only_on_root, \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import \
+     speed_of_light_cgs, \
+     cm_per_km
 
 class LightRay(CosmologySplice):
     """
@@ -51,7 +54,9 @@
     near_redshift : float
         The near (lowest) redshift for the light ray.
     far_redshift : float
-        The far (highest) redshift for the light ray.
+        The far (highest) redshift for the light ray.  NOTE: in order 
+        to use only a single dataset in a light ray, set the 
+        near_redshift and far_redshift to be the same.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
@@ -111,65 +116,92 @@
                                        time_data=time_data,
                                        redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, filename=None):
+    def _calculate_light_ray_solution(self, seed=None, 
+                                      start_position=None, end_position=None,
+                                      trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
         np.random.seed(seed)
 
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
+        # If using only one dataset, set start and stop manually.
+        if start_position is not None:
+            if len(self.light_ray_solution) > 1:
+                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+            if not ((end_position is None) ^ (trajectory is None)):
+                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if end_position is not None:
+                self.light_ray_solution[0]['end'] = np.array(end_position)
+            else:
+                # assume trajectory given as r, theta, phi
+                if len(trajectory) != 3:
+                    raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+                r, theta, phi = trajectory
+                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+                  r * np.array([np.cos(phi) * np.sin(theta),
+                                np.sin(phi) * np.sin(theta),
+                                np.cos(theta)])
+            self.light_ray_solution[0]['traversal_box_fraction'] = \
+              vector_length(self.light_ray_solution[0]['start'], 
+                            self.light_ray_solution[0]['end'])
 
-        for q in range(len(self.light_ray_solution)):
-            if (q == len(self.light_ray_solution) - 1):
-                z_next = self.near_redshift
-            else:
-                z_next = self.light_ray_solution[q+1]['redshift']
+        # the normal way (random start positions and trajectories for each dataset)
+        else:
+            
+            # For box coherence, keep track of effective depth travelled.
+            box_fraction_used = 0.0
 
-            # Calculate fraction of box required for a depth of delta z
-            self.light_ray_solution[q]['traversal_box_fraction'] = \
-                self.cosmology.ComovingRadialDistance(\
-                z_next, self.light_ray_solution[q]['redshift']) * \
-                self.simulation.hubble_constant / \
-                self.simulation.box_size
+            for q in range(len(self.light_ray_solution)):
+                if (q == len(self.light_ray_solution) - 1):
+                    z_next = self.near_redshift
+                else:
+                    z_next = self.light_ray_solution[q+1]['redshift']
 
-            # Simple error check to make sure more than 100% of box depth
-            # is never required.
-            if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_ray_solution[q]['redshift'], z_next,
-                             self.light_ray_solution[q]['traversal_box_fraction']))
-                mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_ray_solution[q]['deltazMax'],
-                             self.light_ray_solution[q]['redshift']-z_next))
+                # Calculate fraction of box required for a depth of delta z
+                self.light_ray_solution[q]['traversal_box_fraction'] = \
+                    self.cosmology.ComovingRadialDistance(\
+                    z_next, self.light_ray_solution[q]['redshift']) * \
+                    self.simulation.hubble_constant / \
+                    self.simulation.box_size
 
-            # Get dataset axis and center.
-            # If using box coherence, only get start point and vector if
-            # enough of the box has been used,
-            # or if box_fraction_used will be greater than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used >
-                     self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                # Random start point
-                self.light_ray_solution[q]['start'] = np.random.random(3)
-                theta = np.pi * np.random.random()
-                phi = 2 * np.pi * np.random.random()
-                box_fraction_used = 0.0
-            else:
-                # Use end point of previous segment and same theta and phi.
-                self.light_ray_solution[q]['start'] = \
-                  self.light_ray_solution[q-1]['end'][:]
+                # Simple error check to make sure more than 100% of box depth
+                # is never required.
+                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+                                (self.light_ray_solution[q]['redshift'], z_next,
+                                 self.light_ray_solution[q]['traversal_box_fraction']))
+                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+                                (self.light_ray_solution[q]['deltazMax'],
+                                 self.light_ray_solution[q]['redshift']-z_next))
 
-            self.light_ray_solution[q]['end'] = \
-              self.light_ray_solution[q]['start'] + \
-                self.light_ray_solution[q]['traversal_box_fraction'] * \
-                np.array([np.cos(phi) * np.sin(theta),
-                          np.sin(phi) * np.sin(theta),
-                          np.cos(theta)])
-            box_fraction_used += \
-              self.light_ray_solution[q]['traversal_box_fraction']
+                # Get dataset axis and center.
+                # If using box coherence, only get start point and vector if
+                # enough of the box has been used,
+                # or if box_fraction_used will be greater than 1 after this slice.
+                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+                        (box_fraction_used >
+                         self.minimum_coherent_box_fraction) or \
+                        (box_fraction_used +
+                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    # Random start point
+                    self.light_ray_solution[q]['start'] = np.random.random(3)
+                    theta = np.pi * np.random.random()
+                    phi = 2 * np.pi * np.random.random()
+                    box_fraction_used = 0.0
+                else:
+                    # Use end point of previous segment and same theta and phi.
+                    self.light_ray_solution[q]['start'] = \
+                      self.light_ray_solution[q-1]['end'][:]
+
+                self.light_ray_solution[q]['end'] = \
+                  self.light_ray_solution[q]['start'] + \
+                    self.light_ray_solution[q]['traversal_box_fraction'] * \
+                    np.array([np.cos(phi) * np.sin(theta),
+                              np.sin(phi) * np.sin(theta),
+                              np.cos(theta)])
+                box_fraction_used += \
+                  self.light_ray_solution[q]['traversal_box_fraction']
 
         if filename is not None:
             self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None, fields=None,
+    def make_light_ray(self, seed=None,
+                       start_position=None, end_position=None,
+                       trajectory=None,
+                       fields=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=False,
                        get_nearest_halo=False,
@@ -197,6 +232,19 @@
         seed : int
             Seed for the random number generator.
             Default: None.
+        start_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the starting position of the ray.
+            Default: None.
+        end_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the ending position of the ray.
+            Default: None.
+        trajectory : list of floats
+            Used only if creating a light ray from a single dataset.
+            The (r, theta, phi) direction of the light ray.  Use either 
+        end_position or trajectory, not both.
+            Default: None.
         fields : list
             A list of fields for which to get data.
             Default: None.
@@ -313,7 +361,11 @@
             nearest_halo_fields = []
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+        self._calculate_light_ray_solution(seed=seed, 
+                                           start_position=start_position, 
+                                           end_position=end_position,
+                                           trajectory=trajectory,
+                                           filename=solution_filename)
 
         # Initialize data structures.
         self._data = {}
@@ -335,9 +387,18 @@
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
                                                        njobs=njobs, dynamic=dynamic):
-            mylog.info("Creating ray segment at z = %f." %
-                       my_segment['redshift'])
-            if my_segment['next'] is None:
+
+            # Load dataset for segment.
+            pf = load(my_segment['filename'])
+
+            if self.near_redshift == self.far_redshift:
+                h_vel = cm_per_km * pf.units['mpc'] * \
+                  vector_length(my_segment['start'], my_segment['end']) * \
+                  self.cosmology.HubbleConstantNow * \
+                  self.cosmology.ExpansionFactor(my_segment['redshift'])
+                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+            elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
                 next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Load dataset for segment.
-            pf = load(my_segment['filename'])
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
 

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectories object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields=None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None: fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first interpolate the field to the particle positions
+        # and then return the field. 
+
+        pf = self.pfs[0]
+        self.derived_field_list = pf.h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if pf.field_info[field].particle_type]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each dataset, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs:
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+                print "Not all requested particle ids contained in this dataset!"
+                raise IndexError
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = np.array(self.times)
+
+        # Now instantiate the requested fields 
+        for field in fields:
+            self._get_data(field)
+            
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+        if key == "particle_time":
+            return self.times
+        if not self.field_data.has_key(key):
+            self._get_data(key)
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        for field in fields:
+            if not self.field_data.has_key(field):
+                self._get_data(field)
+                
+    def _get_data(self, field):
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        if not self.field_data.has_key(field):
+            particles = np.empty((0))
+            step = int(0)
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+                if field in self.particle_fields:
+                    # This is easy... just get the particle fields
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = np.append(particles, pfield[sort])
+                else:
+                    # This is hard... must loop over grids
+                    pfield = np.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    for grid in particle_grids:
+                        cube = grid.retrieve_ghost_zones(1, [field])
+                        CICSample_3(x,y,z,pfield,
+                                    self.num_indices,
+                                    cube[field],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    np.float64(grid['dx']))
+                    particles = np.append(particles, pfield)
+                step += 1
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in xrange(self.num_indices):
+            outlines = [first_str]
+            for it in xrange(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fields = [field for field in sorted(self.field_data.keys())]
+        fid.create_dataset("particle_indices", dtype=np.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        for field in fields:
+            fid.create_dataset("%s" % field, data=self[field])
+        fid.close()

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('particle_trajectories', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/photon_simulator/api.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.analysis_modules.photon_simulator.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .photon_models import \
+     PhotonModel, \
+     ThermalPhotonModel
+
+from .photon_simulator import \
+     PhotonList, \
+     EventList
+
+from .spectral_models import \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel

diff -r bbb78058f4f1107c257cb6652b6b1feebb06c467 -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 yt/analysis_modules/photon_simulator/photon_models.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -0,0 +1,205 @@
+"""
+Classes for specific photon models
+
+The algorithms used here are based off of the method used by the
+PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
+developed by Veronica Biffi and Klaus Dolag. References for
+PHOX may be found at:
+
+Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545
+http://adsabs.harvard.edu/abs/2012MNRAS.420.3545B
+
+Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395
+http://adsabs.harvard.edu/abs/2013MNRAS.428.1395B
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import *
+from yt.utilities.physical_constants import \
+     mp, cm_per_km, K_per_keV, cm_per_mpc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system
+
+N_TBIN = 10000
+TMIN = 8.08e-2
+TMAX = 50.
+
+comm = communication_system.communicators[-1]
+
+class PhotonModel(object):
+
+    def __init__(self):
+        pass
+
+    def __call__(self, data_source, parameters):
+        photons = {}
+        return photons
+
+class ThermalPhotonModel(PhotonModel):
+    r"""
+    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    
+    Parameters
+    ----------
+
+    spectral_model : `SpectralModel`
+        A thermal spectral model instance, either of `XSpecThermalModel`
+        or `TableApecModel`. 
+    X_H : float, optional
+        The hydrogen mass fraction.
+    Zmet : float or string, optional
+        The metallicity. If a float, assumes a constant metallicity throughout.
+        If a string, is taken to be the name of the metallicity field.
+    """
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+        self.X_H = X_H
+        self.Zmet = Zmet
+        self.spectral_model = spectral_model
+
+    def __call__(self, data_source, parameters):
+        
+        pf = data_source.pf
+
+        exp_time = parameters["FiducialExposureTime"]
+        area = parameters["FiducialArea"]
+        redshift = parameters["FiducialRedshift"]
+        D_A = parameters["FiducialAngularDiameterDistance"]*cm_per_mpc
+        dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
+                
+        vol_scale = pf.units["cm"]**(-3)/np.prod(pf.domain_width)
+        
+        num_cells = data_source["Temperature"].shape[0]
+        start_c = comm.rank*num_cells/comm.size
+        end_c = (comm.rank+1)*num_cells/comm.size
+        
+        kT = data_source["Temperature"][start_c:end_c].copy()/K_per_keV
+        vol = data_source["CellVolume"][start_c:end_c].copy()
+        dx = data_source["dx"][start_c:end_c].copy()
+        EM = (data_source["Density"][start_c:end_c].copy()/mp)**2
+        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+    
+        data_source.clear_data()
+    
+        x = data_source["x"][start_c:end_c].copy()
+        y = data_source["y"][start_c:end_c].copy()
+        z = data_source["z"][start_c:end_c].copy()
+    
+        data_source.clear_data()
+        
+        vx = data_source["x-velocity"][start_c:end_c].copy()
+        vy = data_source["y-velocity"][start_c:end_c].copy()
+        vz = data_source["z-velocity"][start_c:end_c].copy()
+    
+        if isinstance(self.Zmet, basestring):
+            metalZ = data_source[self.Zmet][start_c:end_c].copy()
+        else:
+            metalZ = self.Zmet*np.ones(EM.shape)
+        
+        data_source.clear_data()
+
+        idxs = np.argsort(kT)
+        dshape = idxs.shape
+
+        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
+        dkT = kT_bins[1]-kT_bins[0]
+        kT_idxs = np.digitize(kT[idxs], kT_bins)
+        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
+        bcounts = np.bincount(kT_idxs).astype("int")
+        bcounts = bcounts[bcounts > 0]
+        n = int(0)
+        bcell = []
+        ecell = []
+        for bcount in bcounts:
+            bcell.append(n)
+            ecell.append(n+bcount)
+            n += bcount
+        kT_idxs = np.unique(kT_idxs)
+        
+        self.spectral_model.prepare()
+        energy = self.spectral_model.ebins
+    
+        cell_em = EM[idxs]*vol_scale
+        cell_vol = vol[idxs]*vol_scale
+    
+        number_of_photons = np.zeros(dshape, dtype='uint64')
+        energies = []
+    
+        u = np.random.random(cell_em.shape)
+        
+        pbar = get_pbar("Generating Photons", dshape[0])
+
+        for i, ikT in enumerate(kT_idxs):
+
+            ncells = int(bcounts[i])
+            ibegin = bcell[i]
+            iend = ecell[i]
+            kT = kT_bins[ikT] + 0.5*dkT
+        
+            em_sum_c = cell_em[ibegin:iend].sum()
+            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+            
+            cspec, mspec = self.spectral_model.get_spectrum(kT)
+            cspec *= dist_fac*em_sum_c/vol_scale
+            mspec *= dist_fac*em_sum_m/vol_scale
+        
+            cumspec_c = np.cumsum(cspec)
+            counts_c = cumspec_c[:]/cumspec_c[-1]
+            counts_c = np.insert(counts_c, 0, 0.0)
+            tot_ph_c = cumspec_c[-1]*area*exp_time
+
+            cumspec_m = np.cumsum(mspec)
+            counts_m = cumspec_m[:]/cumspec_m[-1]
+            counts_m = np.insert(counts_m, 0, 0.0)
+            tot_ph_m = cumspec_m[-1]*area*exp_time
+        
+            for icell in xrange(ibegin, iend):
+            
+                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+            
+                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+            
+                cell_n = cell_n_c + cell_n_m
+
+                if cell_n > 0:
+                    number_of_photons[icell] = cell_n
+                    randvec_c = np.random.uniform(size=cell_n_c)
+                    randvec_c.sort()
+                    randvec_m = np.random.uniform(size=cell_n_m)
+                    randvec_m.sort()
+                    cell_e_c = np.interp(randvec_c, counts_c, energy)
+                    cell_e_m = np.interp(randvec_m, counts_m, energy)
+                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
+                
+                pbar.update(icell)
+
+        pbar.finish()
+            
+        active_cells = number_of_photons > 0
+        idxs = idxs[active_cells]
+        
+        photons = {}
+
+        src_ctr = parameters["center"]
+        
+        photons["x"] = (x[idxs]-src_ctr[0])*pf.units["kpc"]
+        photons["y"] = (y[idxs]-src_ctr[1])*pf.units["kpc"]
+        photons["z"] = (z[idxs]-src_ctr[2])*pf.units["kpc"]
+        photons["vx"] = vx[idxs]/cm_per_km
+        photons["vy"] = vy[idxs]/cm_per_km
+        photons["vz"] = vz[idxs]/cm_per_km
+        photons["dx"] = dx[idxs]*pf.units["kpc"]
+        photons["NumberOfPhotons"] = number_of_photons[active_cells]
+        photons["Energy"] = np.concatenate(energies)
+    
+        return photons

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/0405d6eff17e/
Changeset:   0405d6eff17e
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-11-21 06:16:34
Summary:     Adding a new progressbar that is 'notebook-aware'
Affected #:  6 files

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 0405d6eff17e48f919309a5c5653ee9c5568dd63 yt/extern/progressbar.py
--- a/yt/extern/progressbar.py
+++ /dev/null
@@ -1,368 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-1 -*-
-#
-# progressbar  - Text progressbar library for python.
-# Copyright (c) 2005 Nilton Volpato
-# 
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-# 
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-# 
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-
-
-"""Text progressbar library for python.
-
-This library provides a text mode progressbar. This is tipically used
-to display the progress of a long running operation, providing a
-visual clue that processing is underway.
-
-The ProgressBar class manages the progress, and the format of the line
-is given by a number of widgets. A widget is an object that may
-display diferently depending on the state of the progress. There are
-three types of widget:
-- a string, which always shows itself;
-- a ProgressBarWidget, which may return a diferent value every time
-it's update method is called; and
-- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
-expands to fill the remaining width of the line.
-
-The progressbar module is very easy to use, yet very powerful. And
-automatically supports features like auto-resizing when available.
-"""
-
-__author__ = "Nilton Volpato"
-__author_email__ = "first-name dot last-name @ gmail.com"
-__date__ = "2006-05-07"
-__version__ = "2.2"
-
-# Changelog
-#
-# 2006-05-07: v2.2 fixed bug in windows
-# 2005-12-04: v2.1 autodetect terminal width, added start method
-# 2005-12-04: v2.0 everything is now a widget (wow!)
-# 2005-12-03: v1.0 rewrite using widgets
-# 2005-06-02: v0.5 rewrite
-# 2004-??-??: v0.1 first version
-
-
-import sys, time
-from array import array
-try:
-    from fcntl import ioctl
-    import termios
-except ImportError:
-    pass
-import signal
-
-class ProgressBarWidget(object):
-    """This is an element of ProgressBar formatting.
-
-    The ProgressBar object will call it's update value when an update
-    is needed. It's size may change between call, but the results will
-    not be good if the size changes drastically and repeatedly.
-    """
-    def update(self, pbar):
-        """Returns the string representing the widget.
-
-        The parameter pbar is a reference to the calling ProgressBar,
-        where one can access attributes of the class for knowing how
-        the update must be made.
-
-        At least this function must be overriden."""
-        pass
-
-class ProgressBarWidgetHFill(object):
-    """This is a variable width element of ProgressBar formatting.
-
-    The ProgressBar object will call it's update value, informing the
-    width this object must the made. This is like TeX \\hfill, it will
-    expand to fill the line. You can use more than one in the same
-    line, and they will all have the same width, and together will
-    fill the line.
-    """
-    def update(self, pbar, width):
-        """Returns the string representing the widget.
-
-        The parameter pbar is a reference to the calling ProgressBar,
-        where one can access attributes of the class for knowing how
-        the update must be made. The parameter width is the total
-        horizontal width the widget must have.
-
-        At least this function must be overriden."""
-        pass
-
-
-class ETA(ProgressBarWidget):
-    "Widget for the Estimated Time of Arrival"
-    def format_time(self, seconds):
-        return time.strftime('%H:%M:%S', time.gmtime(seconds))
-    def update(self, pbar):
-        if pbar.currval == 0:
-            return 'ETA:  --:--:--'
-        elif pbar.finished:
-            return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
-        else:
-            elapsed = pbar.seconds_elapsed
-            eta = elapsed * pbar.maxval / pbar.currval - elapsed
-            return 'ETA:  %s' % self.format_time(eta)
-
-class FileTransferSpeed(ProgressBarWidget):
-    "Widget for showing the transfer speed (useful for file transfers)."
-    def __init__(self):
-        self.fmt = '%6.2f %s'
-        self.units = ['B','K','M','G','T','P']
-    def update(self, pbar):
-        if pbar.seconds_elapsed < 2e-6:#== 0:
-            bps = 0.0
-        else:
-            bps = float(pbar.currval) / pbar.seconds_elapsed
-        spd = bps
-        for u in self.units:
-            if spd < 1000:
-                break
-            spd /= 1000
-        return self.fmt % (spd, u+'/s')
-
-class RotatingMarker(ProgressBarWidget):
-    "A rotating marker for filling the bar of progress."
-    def __init__(self, markers='|/-\\'):
-        self.markers = markers
-        self.curmark = -1
-    def update(self, pbar):
-        if pbar.finished:
-            return self.markers[0]
-        self.curmark = (self.curmark + 1)%len(self.markers)
-        return self.markers[self.curmark]
-
-class Percentage(ProgressBarWidget):
-    "Just the percentage done."
-    def update(self, pbar):
-        return '%3d%%' % pbar.percentage()
-
-class Bar(ProgressBarWidgetHFill):
-    "The bar of progress. It will strech to fill the line."
-    def __init__(self, marker='#', left='|', right='|'):
-        self.marker = marker
-        self.left = left
-        self.right = right
-    def _format_marker(self, pbar):
-        if isinstance(self.marker, (str, unicode)):
-            return self.marker
-        else:
-            return self.marker.update(pbar)
-    def update(self, pbar, width):
-        percent = pbar.percentage()
-        cwidth = width - len(self.left) - len(self.right)
-        marked_width = int(percent * cwidth / 100)
-        m = self._format_marker(pbar)
-        bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
-        return bar
-
-class ReverseBar(Bar):
-    "The reverse bar of progress, or bar of regress. :)"
-    def update(self, pbar, width):
-        percent = pbar.percentage()
-        cwidth = width - len(self.left) - len(self.right)
-        marked_width = int(percent * cwidth / 100)
-        m = self._format_marker(pbar)
-        bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
-        return bar
-
-default_widgets = [Percentage(), ' ', Bar()]
-class ProgressBar(object):
-    """This is the ProgressBar class, it updates and prints the bar.
-
-    The term_width parameter may be an integer. Or None, in which case
-    it will try to guess it, if it fails it will default to 80 columns.
-
-    The simple use is like this:
-    >>> pbar = ProgressBar().start()
-    >>> for i in xrange(100):
-    ...    # do something
-    ...    pbar.update(i+1)
-    ...
-    >>> pbar.finish()
-
-    But anything you want to do is possible (well, almost anything).
-    You can supply different widgets of any type in any order. And you
-    can even write your own widgets! There are many widgets already
-    shipped and you should experiment with them.
-
-    When implementing a widget update method you may access any
-    attribute or function of the ProgressBar object calling the
-    widget's update method. The most important attributes you would
-    like to access are:
-    - currval: current value of the progress, 0 <= currval <= maxval
-    - maxval: maximum (and final) value of the progress
-    - finished: True if the bar is have finished (reached 100%), False o/w
-    - start_time: first time update() method of ProgressBar was called
-    - seconds_elapsed: seconds elapsed since start_time
-    - percentage(): percentage of the progress (this is a method)
-    """
-    def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
-                 fd=sys.stderr):
-        assert maxval > 0
-        self.maxval = maxval
-        self.widgets = widgets
-        self.fd = fd
-        self.signal_set = False
-        if term_width is None:
-            try:
-                self.handle_resize(None,None)
-                signal.signal(signal.SIGWINCH, self.handle_resize)
-                self.signal_set = True
-            except:
-                self.term_width = 79
-        else:
-            self.term_width = term_width
-
-        self.currval = 0
-        self.finished = False
-        self.prev_percentage = -1
-        self.start_time = None
-        self.seconds_elapsed = 0
-
-    def handle_resize(self, signum, frame):
-        h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
-        self.term_width = w
-
-    def percentage(self):
-        "Returns the percentage of the progress."
-        return self.currval*100.0 / self.maxval
-
-    def _format_widgets(self):
-        r = []
-        hfill_inds = []
-        num_hfill = 0
-        currwidth = 0
-        for i, w in enumerate(self.widgets):
-            if isinstance(w, ProgressBarWidgetHFill):
-                r.append(w)
-                hfill_inds.append(i)
-                num_hfill += 1
-            elif isinstance(w, (str, unicode)):
-                r.append(w)
-                currwidth += len(w)
-            else:
-                weval = w.update(self)
-                currwidth += len(weval)
-                r.append(weval)
-        for iw in hfill_inds:
-            r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
-        return r
-
-    def _format_line(self):
-        return ''.join(self._format_widgets()).ljust(self.term_width)
-
-    def _need_update(self):
-        return int(self.percentage()) != int(self.prev_percentage)
-
-    def update(self, value):
-        "Updates the progress bar to a new value."
-        assert 0 <= value <= self.maxval
-        self.currval = value
-        if not self._need_update() or self.finished:
-            return
-        if not self.start_time:
-            self.start_time = time.time()
-        self.seconds_elapsed = time.time() - self.start_time
-        self.prev_percentage = self.percentage()
-        if value != self.maxval:
-            self.fd.write(self._format_line() + '\r')
-        else:
-            self.finished = True
-            self.fd.write(self._format_line() + '\n')
-
-    def start(self):
-        """Start measuring time, and prints the bar at 0%.
-
-        It returns self so you can use it like this:
-        >>> pbar = ProgressBar().start()
-        >>> for i in xrange(100):
-        ...    # do something
-        ...    pbar.update(i+1)
-        ...
-        >>> pbar.finish()
-        """
-        self.update(0)
-        return self
-
-    def finish(self):
-        """Used to tell the progress is finished."""
-        self.update(self.maxval)
-        if self.signal_set:
-            signal.signal(signal.SIGWINCH, signal.SIG_DFL)
-        
-
-
-
-
-
-if __name__=='__main__':
-    import os
-
-    def example1():
-        widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
-                   ' ', ETA(), ' ', FileTransferSpeed()]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
-        for i in range(1000000):
-            # do something
-            pbar.update(10*i+1)
-        pbar.finish()
-        print
-
-    def example2():
-        class CrazyFileTransferSpeed(FileTransferSpeed):
-            "It's bigger between 45 and 80 percent"
-            def update(self, pbar):
-                if 45 < pbar.percentage() < 80:
-                    return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
-                else:
-                    return FileTransferSpeed.update(self,pbar)
-
-        widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000)
-        # maybe do something
-        pbar.start()
-        for i in range(2000000):
-            # do something
-            pbar.update(5*i+1)
-        pbar.finish()
-        print
-
-    def example3():
-        widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
-        for i in range(1000000):
-            # do something
-            pbar.update(10*i+1)
-        pbar.finish()
-        print
-
-    def example4():
-        widgets = ['Test: ', Percentage(), ' ',
-                   Bar(marker='0',left='[',right=']'),
-                   ' ', ETA(), ' ', FileTransferSpeed()]
-        pbar = ProgressBar(widgets=widgets, maxval=500)
-        pbar.start()
-        for i in range(100,500+1,50):
-            time.sleep(0.2)
-            pbar.update(i)
-        pbar.finish()
-        print
-
-
-    example1()
-    example2()
-    example3()
-    example4()
-

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 0405d6eff17e48f919309a5c5653ee9c5568dd63 yt/extern/progressbar/__init__.py
--- /dev/null
+++ b/yt/extern/progressbar/__init__.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# progressbar  - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""Text progress bar library for Python.
+
+A text progress bar is typically used to display the progress of a long
+running operation, providing a visual cue that processing is underway.
+
+The ProgressBar class manages the current progress, and the format of the line
+is given by a number of widgets. A widget is an object that may display
+differently depending on the state of the progress bar. There are three types
+of widgets:
+ - a string, which always shows itself
+
+ - a ProgressBarWidget, which may return a different value every time its
+   update method is called
+
+ - a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
+   expands to fill the remaining width of the line.
+
+The progressbar module is very easy to use, yet very powerful. It will also
+automatically enable features like auto-resizing when the system supports it.
+"""
+
+__author__ = 'Nilton Volpato'
+__author_email__ = 'first-name dot last-name @ gmail.com'
+__date__ = '2011-05-14'
+__version__ = '2.3'
+
+from compat import *
+from widgets import *
+from progressbar import *

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 0405d6eff17e48f919309a5c5653ee9c5568dd63 yt/extern/progressbar/compat.py
--- /dev/null
+++ b/yt/extern/progressbar/compat.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# progressbar  - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""Compatibility methods and classes for the progressbar module."""
+
+
+# Python 3.x (and backports) use a modified iterator syntax
+# This will allow 2.x to behave with 3.x iterators
+try:
+  next
+except NameError:
+    def next(iter):
+        try:
+            # Try new style iterators
+            return iter.__next__()
+        except AttributeError:
+            # Fallback in case of a "native" iterator
+            return iter.next()
+
+
+# Python < 2.5 does not have "any"
+try:
+  any
+except NameError:
+   def any(iterator):
+      for item in iterator:
+         if item: return True
+      return False

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 0405d6eff17e48f919309a5c5653ee9c5568dd63 yt/extern/progressbar/progressbar.py
--- /dev/null
+++ b/yt/extern/progressbar/progressbar.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# progressbar  - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""Main ProgressBar class."""
+
+from __future__ import division
+
+import math
+import os
+import signal
+import sys
+import time
+import uuid
+
+try:
+    from fcntl import ioctl
+    from array import array
+    import termios
+except ImportError:
+    pass
+
+import widgets
+
+# Test to see if we are in an IPython session.
+ipython = None
+for key in ['KernelApp','IPKernelApp']:
+  try:
+    ipython = get_ipython().config[key]['parent_appname']
+  except (NameError, KeyError):
+    pass
+
+ipython_notebook_css = """
+td.pb_widget {
+    width: auto;
+}
+td.pb_widget_fill {
+    width: 100%;
+}
+table.pb {
+    font-family: monospace;
+    border: 0;
+    margin: 0;
+}
+table.pb tr { border: 0; }
+table.pb td {
+    white-space: nowrap;
+    border: 0;
+}
+div.pb {
+    border: 1px solid #ddd;
+    border-radius: 3px;
+}
+div.pb_bar {
+    height: 1.5em;
+}
+""".replace('\n', ' ')
+
+class UnknownLength: pass
+
+
+class ProgressBar(object):
+    """The ProgressBar class which updates and prints the bar.
+
+    A common way of using it is like:
+    >>> pbar = ProgressBar().start()
+    >>> for i in range(100):
+    ...    # do something
+    ...    pbar.update(i+1)
+    ...
+    >>> pbar.finish()
+
+    You can also use a ProgressBar as an iterator:
+    >>> progress = ProgressBar()
+    >>> for i in progress(some_iterable):
+    ...    # do something
+    ...
+
+    Since the progress bar is incredibly customizable you can specify
+    different widgets of any type in any order. You can even write your own
+    widgets! However, since there are already a good number of widgets you
+    should probably play around with them before moving on to create your own
+    widgets.
+
+    The term_width parameter represents the current terminal width. If the
+    parameter is set to an integer then the progress bar will use that,
+    otherwise it will attempt to determine the terminal width falling back to
+    80 columns if the width cannot be determined.
+
+    When implementing a widget's update method you are passed a reference to
+    the current progress bar. As a result, you have access to the
+    ProgressBar's methods and attributes. Although there is nothing preventing
+    you from changing the ProgressBar you should treat it as read only.
+
+    Useful methods and attributes include (Public API):
+     - currval: current progress (0 <= currval <= maxval)
+     - maxval: maximum (and final) value
+     - finished: True if the bar has finished (reached 100%)
+     - start_time: the time when start() method of ProgressBar was called
+     - seconds_elapsed: seconds elapsed since start_time and last call to
+                        update
+     - percentage(): progress in percent [0..100]
+    """
+
+    __slots__ = ('currval', 'fd', 'finished', 'last_update_time',
+                 'left_justify', 'maxval', 'next_update', 'num_intervals',
+                 'poll', 'seconds_elapsed', 'signal_set', 'start_time',
+                 'term_width', 'update_interval', 'widgets', '_time_sensitive',
+                 '__iterable', 'attr', 'html_written', 'uuid')
+
+    _DEFAULT_MAXVAL = 100
+    _DEFAULT_TERMSIZE = 80
+    _DEFAULT_WIDGETS = [widgets.Percentage, widgets.Bar]
+
+    def __init__(self, maxval=None, widgets=None, term_width=None, poll=1,
+                 left_justify=True, fd=sys.stdout, attr={}):
+        """Initializes a progress bar with sane defaults."""
+
+        # Don't share a reference with any other progress bars
+        if widgets is None:
+            widgets = [widget() for widget in self._DEFAULT_WIDGETS]
+
+        self.maxval = maxval
+        self.widgets = widgets
+        self.fd = fd
+        self.left_justify = left_justify
+
+        self.signal_set = False
+        if term_width is not None:
+            self.term_width = term_width
+        else:
+            try:
+                self._handle_resize()
+                signal.signal(signal.SIGWINCH, self._handle_resize)
+                self.signal_set = True
+            except (SystemExit, KeyboardInterrupt): raise
+            except:
+                self.term_width = self._env_size()
+
+        self.__iterable = None
+        self._update_widgets()
+        self.currval = 0
+        self.finished = False
+        self.last_update_time = None
+        self.poll = poll
+        self.seconds_elapsed = 0
+        self.start_time = None
+        self.update_interval = 1
+        self.attr = attr
+
+        # Set flag so we only write out the HTML once,
+        # then update with javascript
+        self.html_written = False
+
+        self.uuid = str(uuid.uuid4())
+
+        # Install our CSS if we are in an IPython notebook
+        if ipython == 'ipython-notebook':
+            from IPython.display import Javascript, display
+            display(Javascript('//%s\n$("head").append("<style>%s</style>")' %
+                               (self.uuid,ipython_notebook_css)))
+            
+            # Also add a function that removes progressbar output from the cells
+            js = '''
+                  // %s -- used to remove this code blob in the end
+                  IPython.OutputArea.prototype.cleanProgressBar = function(uuids) {
+                      // filter by uuid-strings 
+                      var myfilter = function(output) { 
+                          var nuids = uuids.length;
+                          for (var i=0; i<nuids; i++) {
+                              if (output.hasOwnProperty('html')) {
+                                  if (output.html.indexOf(uuids[i]) != -1) {
+                                      return false;
+                                  }
+                              }
+                              if (output.hasOwnProperty('javascript')) {
+                                  if (output.javascript.indexOf(uuids[i]) != -1) {
+                                      return false;
+                                  }
+                              }
+                          }
+                          // keep all others
+                          return true;
+                      };
+
+                      // Filter the ouputs
+                      this.outputs = this.outputs.filter(myfilter);
+                };
+                ''' % self.uuid
+            display(Javascript(js))
+
+    def __call__(self, iterable):
+        """Use a ProgressBar to iterate through an iterable."""
+
+        try:
+            self.maxval = len(iterable)
+        except:
+            if self.maxval is None:
+                self.maxval = UnknownLength
+
+        self.__iterable = iter(iterable)
+        return self
+
+
+    def __iter__(self):
+        return self
+
+
+    def __next__(self):
+        try:
+            value = next(self.__iterable)
+            if self.start_time is None: self.start()
+            else: self.update(self.currval + 1)
+            return value
+        except StopIteration:
+            self.finish()
+            raise
+
+
+    # Create an alias so that Python 2.x won't complain about not being
+    # an iterator.
+    next = __next__
+
+
+    def _env_size(self):
+        """Tries to find the term_width from the environment."""
+
+        return int(os.environ.get('COLUMNS', self._DEFAULT_TERMSIZE)) - 1
+
+
+    def _handle_resize(self, signum=None, frame=None):
+        """Tries to catch resize signals sent from the terminal."""
+
+        h, w = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\0' * 8))[:2]
+        self.term_width = w
+
+
+    def percentage(self):
+        """Returns the progress as a percentage."""
+        return self.currval * 100.0 / self.maxval
+
+    percent = property(percentage)
+
+
+    def _format_widgets(self):
+        result = []
+        expanding = []
+        width = self.term_width
+
+        for index, widget in enumerate(self.widgets):
+            if isinstance(widget, widgets.WidgetHFill):
+                result.append(widget)
+                expanding.insert(0, index)
+            else:
+                widget = widgets.format_updatable(widget, self)
+                result.append(widget)
+                width -= len(widget)
+
+        count = len(expanding)
+        while count:
+            portion = max(int(math.ceil(width * 1. / count)), 0)
+            index = expanding.pop()
+            count -= 1
+
+            widget = result[index].update(self, portion)
+            width -= len(widget)
+            result[index] = widget
+
+        return result
+
+
+    def _format_line(self):
+        """Joins the widgets and justifies the line."""
+
+        widgets = ''.join(self._format_widgets())
+
+        if self.left_justify: return widgets.ljust(self.term_width)
+        else: return widgets.rjust(self.term_width)
+
+
+    def _format_html(self):
+      html = '<div class="pb" id="%s"><table class="pb ui-widget"><tr>\n' % self.uuid
+      for widget in self.widgets:
+        if isinstance(widget, widgets.WidgetHFill):
+          td_class = 'pb_widget_fill'
+        else:
+          td_class = 'pb_widget'
+
+        html += ('<td class="%s">' % td_class) + \
+                widgets.format_updatable_html(widget, self) + \
+                '</td>\n'
+      html += '</tr></table><div>'
+      return html
+
+
+    def _need_update(self):
+        """Returns whether the ProgressBar should redraw the line."""
+        if self.currval >= self.next_update or self.finished: return True
+
+        delta = time.time() - self.last_update_time
+        return self._time_sensitive and delta > self.poll
+
+
+    def _update_widgets(self):
+        """Checks all widgets for the time sensitive bit."""
+
+        self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
+                                    for w in self.widgets)
+
+
+    def update(self, value=None, attr={}):
+        """Updates the ProgressBar to a new value."""
+
+        if value is not None and value is not UnknownLength:
+            if (self.maxval is not UnknownLength
+                and not 0 <= value <= self.maxval):
+
+                raise ValueError('Value out of range')
+
+            self.currval = value
+
+        self.attr.update(attr)
+
+        if not self._need_update(): return
+        if self.start_time is None:
+            raise RuntimeError('You must call "start" before calling "update"')
+
+        now = time.time()
+        self.seconds_elapsed = now - self.start_time
+        self.next_update = self.currval + self.update_interval
+
+        if ipython == 'ipython-notebook':
+            if not self.html_written:
+                # We have yet to display the HTML, do that first
+                from IPython.display import HTML, display
+                display(HTML(self._format_html()))
+                self.html_written = True
+            else:
+                # The HTML has been written once, now update with JS
+                from IPython.display import Javascript, display
+                for widget in self.widgets:
+                    js = widgets.updatable_js(widget, self)
+                    if js:
+                        display(Javascript(js))
+        else:
+            self.fd.write('\r' + self._format_line())
+            self.fd.flush()
+
+        self.last_update_time = now
+
+
+    def start(self):
+        """Starts measuring time, and prints the bar at 0%.
+
+        It returns self so you can use it like this:
+        >>> pbar = ProgressBar().start()
+        >>> for i in range(100):
+        ...    # do something
+        ...    pbar.update(i+1)
+        ...
+        >>> pbar.finish()
+        """
+
+        if self.maxval is None:
+            self.maxval = self._DEFAULT_MAXVAL
+
+        self.num_intervals = max(100, self.term_width)
+        self.next_update = 0
+
+        if self.maxval is not UnknownLength:
+            if self.maxval < 0: raise ValueError('Value out of range')
+            self.update_interval = self.maxval / self.num_intervals
+
+
+        self.start_time = self.last_update_time = time.time()
+        self.html_written = False
+        self.finished = False
+        self.update(0)
+
+        return self
+
+
+    def finish(self):
+        """Puts the ProgressBar bar in the finished state."""
+
+        self.finished = True
+        self.update(self.maxval)
+        self.start_time = None
+
+        # Clean up notebook stuff, quite differently from regular
+        if not ipython == 'ipython-notebook':
+            self.fd.write('\n')
+        else:
+            from IPython.display import Javascript, display
+            # First delete the node that held the progress bar from the page
+            js = """var element = document.getElementById('%s');
+                    element.parentNode.removeChild(element);""" % self.uuid
+            display(Javascript(js))
+
+            # Then also remove its trace from the cell output (so it doesn't get
+            # stored with the notebook). This needs to be done for all widgets as
+            # well as for progressBar
+            uuids = [str(self.uuid)]
+            uuids += [w.uuid for w in self.widgets if isinstance(w, widgets.Widget)]
+            display(Javascript('this.cleanProgressBar(%s)' % uuids))
+
+        if self.signal_set:
+            signal.signal(signal.SIGWINCH, signal.SIG_DFL)

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 0405d6eff17e48f919309a5c5653ee9c5568dd63 yt/extern/progressbar/widgets.py
--- /dev/null
+++ b/yt/extern/progressbar/widgets.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# progressbar  - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""Default ProgressBar widgets."""
+
+from __future__ import division
+
+import datetime
+import math
+import uuid
+
+try:
+    from abc import ABCMeta, abstractmethod
+except ImportError:
+    AbstractWidget = object
+    abstractmethod = lambda fn: fn
+else:
+    AbstractWidget = ABCMeta('AbstractWidget', (object,), {})
+
+
+def format_updatable(updatable, pbar):
+    if hasattr(updatable, 'update'): return updatable.update(pbar)
+    else: return updatable
+
+def format_updatable_html(updatable, pbar):
+    if hasattr(updatable, 'update_html'): return updatable.update_html(pbar)
+    else: return updatable
+
+def updatable_js(updatable, pbar):
+    if hasattr(updatable, 'update_js'): return updatable.update_js(pbar)
+    else: return None
+
+
+class Widget(AbstractWidget):
+    """The base class for all widgets.
+
+    The ProgressBar will call the widget's update value when the widget should
+    be updated. The widget's size may change between calls, but the widget may
+    display incorrectly if the size changes drastically and repeatedly.
+
+    The boolean TIME_SENSITIVE informs the ProgressBar that it should be
+    updated more often because it is time sensitive.
+    """
+
+    TIME_SENSITIVE = False
+    __slots__ = ()
+    uuid = None
+
+    @abstractmethod
+    def update(self, pbar):
+        """Updates the widget.
+
+        pbar - a reference to the calling ProgressBar
+        """
+
+    def update_html(self, pbar):
+        if self.uuid is None:
+            self.uuid = str(uuid.uuid4())
+        return '<div id="%s">%s</div>' % (self.uuid, self.update(pbar))
+
+    def update_js(self, pbar):
+        if self.uuid is None:
+            self.uuid = str(uuid.uuid4())
+        return "$('div#%s').text('%s');" % (self.uuid, self.update(pbar))
+
+
+class WidgetHFill(Widget):
+    """The base class for all variable width widgets.
+
+    This widget is much like the \\hfill command in TeX, it will expand to
+    fill the line. You can use more than one in the same line, and they will
+    all have the same width, and together will fill the line.
+    """
+
+    DEFAULT_WIDTH = 50
+
+    @abstractmethod
+    def update(self, pbar, width=DEFAULT_WIDTH):
+        """Updates the widget providing the total width the widget must fill.
+
+        pbar - a reference to the calling ProgressBar
+        width - The total width the widget must fill
+        """
+
+
+class Timer(Widget):
+    """Widget which displays the elapsed seconds."""
+
+    __slots__ = ('format_string',)
+    TIME_SENSITIVE = True
+
+    def __init__(self, format='Elapsed Time: %s'):
+        self.format_string = format
+
+    @staticmethod
+    def format_time(seconds):
+        """Formats time as the string "HH:MM:SS"."""
+
+        return str(datetime.timedelta(seconds=int(seconds)))
+
+
+    def update(self, pbar):
+        """Updates the widget to show the elapsed time."""
+
+        return self.format_string % self.format_time(pbar.seconds_elapsed)
+
+
+class ETA(Timer):
+    """Widget which attempts to estimate the time of arrival."""
+
+    TIME_SENSITIVE = True
+
+    def update(self, pbar):
+        """Updates the widget to show the ETA or total time when finished."""
+
+        if pbar.currval == 0:
+            return 'ETA:  --:--:--'
+        elif pbar.finished:
+            return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
+        else:
+            elapsed = pbar.seconds_elapsed
+            eta = elapsed * pbar.maxval / pbar.currval - elapsed
+            return 'ETA:  %s' % self.format_time(eta)
+
+
+class FileTransferSpeed(Widget):
+    """Widget for showing the transfer speed (useful for file transfers)."""
+
+    FORMAT = '%6.2f %s%s/s'
+    PREFIXES = ' kMGTPEZY'
+    __slots__ = ('unit',)
+
+    def __init__(self, unit='B'):
+        self.unit = unit
+
+    def update(self, pbar):
+        """Updates the widget with the current SI prefixed speed."""
+
+        if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0
+            scaled = power = 0
+        else:
+            speed = pbar.currval / pbar.seconds_elapsed
+            power = int(math.log(speed, 1000))
+            scaled = speed / 1000.**power
+
+        return self.FORMAT % (scaled, self.PREFIXES[power], self.unit)
+
+
+class AnimatedMarker(Widget):
+    """An animated marker for the progress bar which defaults to appear as if
+    it were rotating.
+    """
+
+    __slots__ = ('markers', 'curmark')
+
+    def __init__(self, markers='|/-\\'):
+        self.markers = markers
+        self.curmark = -1
+
+    def update(self, pbar):
+        """Updates the widget to show the next marker or the first marker when
+        finished"""
+
+        if pbar.finished: return self.markers[0]
+
+        self.curmark = (self.curmark + 1) % len(self.markers)
+        return self.markers[self.curmark]
+
+# Alias for backwards compatibility
+RotatingMarker = AnimatedMarker
+
+
+class Counter(Widget):
+    """Displays the current count."""
+
+    __slots__ = ('format_string',)
+
+    def __init__(self, format='%d'):
+        self.format_string = format
+
+    def update(self, pbar):
+        return self.format_string % pbar.currval
+
+
+class Attribute(Widget):
+    """Displays the values of ProgressBar attributes.
+
+    attr_name - ProgressBar attribute dictionary key or list of keys
+    format_string - Format for the output. Attributes are looked up according
+      to attr_name and then used as a tuple with this format string, i.e.
+      format_string % attr_tuple
+    fallback - If an attribute lookup fails, this string is displayed instead.
+
+    """
+
+    __slots__ = ('attr_name', 'format_string', 'fallback')
+
+    def __init__(self, attr_name, format='%s', fallback='?'):
+        self.attr_name = attr_name
+        self.format_string = format
+        self.fallback = fallback
+
+    def update(self, pbar):
+        try:
+          if isinstance(self.attr_name, basestring) or len(self.attr_name) == 1:
+            # If attr_name is just a string or a single item,
+            # use it as the key as is
+            format_vars = (pbar.attr[self.attr_name],)
+          else:
+            # else, expand it as a tuple of attributes
+            format_vars = tuple([pbar.attr[a] for a in self.attr_name])
+          return self.format_string % format_vars
+        except KeyError:
+          return self.fallback
+
+
+class Percentage(Widget):
+    """Displays the current percentage as a number with a percent sign."""
+
+    def update(self, pbar):
+        return '%3d%%' % pbar.percentage()
+
+
+class FormatLabel(Timer):
+    """Displays a formatted label."""
+
+    mapping = {
+        'elapsed': ('seconds_elapsed', Timer.format_time),
+        'finished': ('finished', None),
+        'last_update': ('last_update_time', None),
+        'max': ('maxval', None),
+        'seconds': ('seconds_elapsed', None),
+        'start': ('start_time', None),
+        'value': ('currval', None)
+    }
+
+    __slots__ = ('format_string',)
+    def __init__(self, format):
+        self.format_string = format
+
+    def update(self, pbar):
+        context = {}
+        for name, (key, transform) in self.mapping.items():
+            try:
+                value = getattr(pbar, key)
+
+                if transform is None:
+                   context[name] = value
+                else:
+                   context[name] = transform(value)
+            except: pass
+
+        return self.format_string % context
+
+
+class SimpleProgress(Widget):
+    """Returns progress as a count of the total (e.g.: "5 of 47")."""
+
+    __slots__ = ('sep',)
+
+    def __init__(self, sep=' of '):
+        self.sep = sep
+
+    def update(self, pbar):
+        return '%d%s%d' % (pbar.currval, self.sep, pbar.maxval)
+
+
+class Bar(WidgetHFill):
+    """A progress bar which stretches to fill the line."""
+
+    __slots__ = ('marker', 'left', 'right', 'fill', 'fill_left')
+
+    def __init__(self, marker='#', left='|', right='|', fill=' ',
+                 fill_left=True):
+        """Creates a customizable progress bar.
+
+        marker - string or updatable object to use as a marker
+        left - string or updatable object to use as a left border
+        right - string or updatable object to use as a right border
+        fill - character to use for the empty part of the progress bar
+        fill_left - whether to fill from the left or the right
+        """
+        self.marker = marker
+        self.left = left
+        self.right = right
+        self.fill = fill
+        self.fill_left = fill_left
+
+    def update(self, pbar, width=WidgetHFill.DEFAULT_WIDTH):
+        """Updates the progress bar and its subcomponents."""
+
+        left, marked, right = (format_updatable(i, pbar) for i in
+                               (self.left, self.marker, self.right))
+
+        width -= len(left) + len(right)
+        # Marked must *always* have length of 1
+        if pbar.maxval:
+          marked *= int(pbar.currval / pbar.maxval * width)
+        else:
+          marked = ''
+
+        if self.fill_left:
+            return '%s%s%s' % (left, marked.ljust(width, self.fill), right)
+        else:
+            return '%s%s%s' % (left, marked.rjust(width, self.fill), right)
+
+
+    def update_html(self, pbar):
+        if self.uuid is None:
+            self.uuid = str(uuid.uuid4())
+        return """
+        <div class="pb_bar" id="%s"></div>
+        <script type="text/javascript">
+            $("div#%s").progressbar({value: 0, max: %d});
+        </script>
+        """ % (self.uuid, self.uuid,pbar.maxval)
+
+
+    def update_js(self, pbar):
+        if self.uuid is None:
+            self.uuid = str(uuid.uuid4())
+        return """
+        var $myPB = $("div#{divid}")
+        if ($myPB.hasClass('ui-progressbar')) {{
+            $myPB.progressbar('value', {pbar.currval:d});
+        }} else {{
+            $myPB.progressbar({{value: 0, max: {pbar.maxval:d}}});
+        }}
+        """.format(divid=self.uuid, pbar=pbar)
+
+
+class ReverseBar(Bar):
+    """A bar which has a marker which bounces from side to side."""
+
+    def __init__(self, marker='#', left='|', right='|', fill=' ',
+                 fill_left=False):
+        """Creates a customizable progress bar.
+
+        marker - string or updatable object to use as a marker
+        left - string or updatable object to use as a left border
+        right - string or updatable object to use as a right border
+        fill - character to use for the empty part of the progress bar
+        fill_left - whether to fill from the left or the right
+        """
+        self.marker = marker
+        self.left = left
+        self.right = right
+        self.fill = fill
+        self.fill_left = fill_left
+
+
+class BouncingBar(Bar):
+    def update(self, pbar, width=WidgetHFill.DEFAULT_WIDTH):
+        """Updates the progress bar and its subcomponents."""
+
+        left, marker, right = (format_updatable(i, pbar) for i in
+                               (self.left, self.marker, self.right))
+
+        width -= len(left) + len(right)
+
+        if pbar.finished: return '%s%s%s' % (left, width * marker, right)
+
+        position = int(pbar.currval % (width * 2 - 1))
+        if position > width: position = width * 2 - position
+        lpad = self.fill * (position - 1)
+        rpad = self.fill * (width - len(marker) - len(lpad))
+
+        # Swap if we want to bounce the other way
+        if not self.fill_left: rpad, lpad = lpad, rpad
+
+        return '%s%s%s%s%s' % (left, lpad, marker, rpad, right)

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r 0405d6eff17e48f919309a5c5653ee9c5568dd63 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -337,7 +337,6 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
@@ -345,12 +344,13 @@
         return ExtProgressBar(title, maxval)
     elif ytcfg.getboolean("yt", "__parallel"):
         return ParallelProgressBar(title, maxval)
-    widgets = [ title,
-            pb.Percentage(), ' ',
-            pb.Bar(marker=pb.RotatingMarker()),
-            ' ', pb.ETA(), ' ']
-    pbar = pb.ProgressBar(widgets=widgets,
-                          maxval=maxval).start()
+    else:
+        widgets = [ title,
+                    pb.Percentage(), ' ',
+                    pb.Bar(marker=pb.RotatingMarker()),
+                    ' ', pb.ETA(), ' ']
+        pbar = pb.ProgressBar(widgets=widgets,
+                              maxval=maxval).start()
     return pbar
 
 def only_on_root(func, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/5c030d64ef4d/
Changeset:   5c030d64ef4d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-21 17:19:42
Summary:     Merged in ngoldbaum/yt-3.0 (pull request #137)

Adding a new progressbar that is notebook-aware
Affected #:  6 files

diff -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 -r 5c030d64ef4d74b8d3e5593bfebf57c1c6949f5f yt/extern/progressbar.py
--- a/yt/extern/progressbar.py
+++ /dev/null
@@ -1,368 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-1 -*-
-#
-# progressbar  - Text progressbar library for python.
-# Copyright (c) 2005 Nilton Volpato
-# 
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-# 
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-# 
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-
-
-"""Text progressbar library for python.
-
-This library provides a text mode progressbar. This is tipically used
-to display the progress of a long running operation, providing a
-visual clue that processing is underway.
-
-The ProgressBar class manages the progress, and the format of the line
-is given by a number of widgets. A widget is an object that may
-display diferently depending on the state of the progress. There are
-three types of widget:
-- a string, which always shows itself;
-- a ProgressBarWidget, which may return a diferent value every time
-it's update method is called; and
-- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
-expands to fill the remaining width of the line.
-
-The progressbar module is very easy to use, yet very powerful. And
-automatically supports features like auto-resizing when available.
-"""
-
-__author__ = "Nilton Volpato"
-__author_email__ = "first-name dot last-name @ gmail.com"
-__date__ = "2006-05-07"
-__version__ = "2.2"
-
-# Changelog
-#
-# 2006-05-07: v2.2 fixed bug in windows
-# 2005-12-04: v2.1 autodetect terminal width, added start method
-# 2005-12-04: v2.0 everything is now a widget (wow!)
-# 2005-12-03: v1.0 rewrite using widgets
-# 2005-06-02: v0.5 rewrite
-# 2004-??-??: v0.1 first version
-
-
-import sys, time
-from array import array
-try:
-    from fcntl import ioctl
-    import termios
-except ImportError:
-    pass
-import signal
-
-class ProgressBarWidget(object):
-    """This is an element of ProgressBar formatting.
-
-    The ProgressBar object will call it's update value when an update
-    is needed. It's size may change between call, but the results will
-    not be good if the size changes drastically and repeatedly.
-    """
-    def update(self, pbar):
-        """Returns the string representing the widget.
-
-        The parameter pbar is a reference to the calling ProgressBar,
-        where one can access attributes of the class for knowing how
-        the update must be made.
-
-        At least this function must be overriden."""
-        pass
-
-class ProgressBarWidgetHFill(object):
-    """This is a variable width element of ProgressBar formatting.
-
-    The ProgressBar object will call it's update value, informing the
-    width this object must the made. This is like TeX \\hfill, it will
-    expand to fill the line. You can use more than one in the same
-    line, and they will all have the same width, and together will
-    fill the line.
-    """
-    def update(self, pbar, width):
-        """Returns the string representing the widget.
-
-        The parameter pbar is a reference to the calling ProgressBar,
-        where one can access attributes of the class for knowing how
-        the update must be made. The parameter width is the total
-        horizontal width the widget must have.
-
-        At least this function must be overriden."""
-        pass
-
-
-class ETA(ProgressBarWidget):
-    "Widget for the Estimated Time of Arrival"
-    def format_time(self, seconds):
-        return time.strftime('%H:%M:%S', time.gmtime(seconds))
-    def update(self, pbar):
-        if pbar.currval == 0:
-            return 'ETA:  --:--:--'
-        elif pbar.finished:
-            return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
-        else:
-            elapsed = pbar.seconds_elapsed
-            eta = elapsed * pbar.maxval / pbar.currval - elapsed
-            return 'ETA:  %s' % self.format_time(eta)
-
-class FileTransferSpeed(ProgressBarWidget):
-    "Widget for showing the transfer speed (useful for file transfers)."
-    def __init__(self):
-        self.fmt = '%6.2f %s'
-        self.units = ['B','K','M','G','T','P']
-    def update(self, pbar):
-        if pbar.seconds_elapsed < 2e-6:#== 0:
-            bps = 0.0
-        else:
-            bps = float(pbar.currval) / pbar.seconds_elapsed
-        spd = bps
-        for u in self.units:
-            if spd < 1000:
-                break
-            spd /= 1000
-        return self.fmt % (spd, u+'/s')
-
-class RotatingMarker(ProgressBarWidget):
-    "A rotating marker for filling the bar of progress."
-    def __init__(self, markers='|/-\\'):
-        self.markers = markers
-        self.curmark = -1
-    def update(self, pbar):
-        if pbar.finished:
-            return self.markers[0]
-        self.curmark = (self.curmark + 1)%len(self.markers)
-        return self.markers[self.curmark]
-
-class Percentage(ProgressBarWidget):
-    "Just the percentage done."
-    def update(self, pbar):
-        return '%3d%%' % pbar.percentage()
-
-class Bar(ProgressBarWidgetHFill):
-    "The bar of progress. It will strech to fill the line."
-    def __init__(self, marker='#', left='|', right='|'):
-        self.marker = marker
-        self.left = left
-        self.right = right
-    def _format_marker(self, pbar):
-        if isinstance(self.marker, (str, unicode)):
-            return self.marker
-        else:
-            return self.marker.update(pbar)
-    def update(self, pbar, width):
-        percent = pbar.percentage()
-        cwidth = width - len(self.left) - len(self.right)
-        marked_width = int(percent * cwidth / 100)
-        m = self._format_marker(pbar)
-        bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
-        return bar
-
-class ReverseBar(Bar):
-    "The reverse bar of progress, or bar of regress. :)"
-    def update(self, pbar, width):
-        percent = pbar.percentage()
-        cwidth = width - len(self.left) - len(self.right)
-        marked_width = int(percent * cwidth / 100)
-        m = self._format_marker(pbar)
-        bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
-        return bar
-
-default_widgets = [Percentage(), ' ', Bar()]
-class ProgressBar(object):
-    """This is the ProgressBar class, it updates and prints the bar.
-
-    The term_width parameter may be an integer. Or None, in which case
-    it will try to guess it, if it fails it will default to 80 columns.
-
-    The simple use is like this:
-    >>> pbar = ProgressBar().start()
-    >>> for i in xrange(100):
-    ...    # do something
-    ...    pbar.update(i+1)
-    ...
-    >>> pbar.finish()
-
-    But anything you want to do is possible (well, almost anything).
-    You can supply different widgets of any type in any order. And you
-    can even write your own widgets! There are many widgets already
-    shipped and you should experiment with them.
-
-    When implementing a widget update method you may access any
-    attribute or function of the ProgressBar object calling the
-    widget's update method. The most important attributes you would
-    like to access are:
-    - currval: current value of the progress, 0 <= currval <= maxval
-    - maxval: maximum (and final) value of the progress
-    - finished: True if the bar is have finished (reached 100%), False o/w
-    - start_time: first time update() method of ProgressBar was called
-    - seconds_elapsed: seconds elapsed since start_time
-    - percentage(): percentage of the progress (this is a method)
-    """
-    def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
-                 fd=sys.stderr):
-        assert maxval > 0
-        self.maxval = maxval
-        self.widgets = widgets
-        self.fd = fd
-        self.signal_set = False
-        if term_width is None:
-            try:
-                self.handle_resize(None,None)
-                signal.signal(signal.SIGWINCH, self.handle_resize)
-                self.signal_set = True
-            except:
-                self.term_width = 79
-        else:
-            self.term_width = term_width
-
-        self.currval = 0
-        self.finished = False
-        self.prev_percentage = -1
-        self.start_time = None
-        self.seconds_elapsed = 0
-
-    def handle_resize(self, signum, frame):
-        h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
-        self.term_width = w
-
-    def percentage(self):
-        "Returns the percentage of the progress."
-        return self.currval*100.0 / self.maxval
-
-    def _format_widgets(self):
-        r = []
-        hfill_inds = []
-        num_hfill = 0
-        currwidth = 0
-        for i, w in enumerate(self.widgets):
-            if isinstance(w, ProgressBarWidgetHFill):
-                r.append(w)
-                hfill_inds.append(i)
-                num_hfill += 1
-            elif isinstance(w, (str, unicode)):
-                r.append(w)
-                currwidth += len(w)
-            else:
-                weval = w.update(self)
-                currwidth += len(weval)
-                r.append(weval)
-        for iw in hfill_inds:
-            r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
-        return r
-
-    def _format_line(self):
-        return ''.join(self._format_widgets()).ljust(self.term_width)
-
-    def _need_update(self):
-        return int(self.percentage()) != int(self.prev_percentage)
-
-    def update(self, value):
-        "Updates the progress bar to a new value."
-        assert 0 <= value <= self.maxval
-        self.currval = value
-        if not self._need_update() or self.finished:
-            return
-        if not self.start_time:
-            self.start_time = time.time()
-        self.seconds_elapsed = time.time() - self.start_time
-        self.prev_percentage = self.percentage()
-        if value != self.maxval:
-            self.fd.write(self._format_line() + '\r')
-        else:
-            self.finished = True
-            self.fd.write(self._format_line() + '\n')
-
-    def start(self):
-        """Start measuring time, and prints the bar at 0%.
-
-        It returns self so you can use it like this:
-        >>> pbar = ProgressBar().start()
-        >>> for i in xrange(100):
-        ...    # do something
-        ...    pbar.update(i+1)
-        ...
-        >>> pbar.finish()
-        """
-        self.update(0)
-        return self
-
-    def finish(self):
-        """Used to tell the progress is finished."""
-        self.update(self.maxval)
-        if self.signal_set:
-            signal.signal(signal.SIGWINCH, signal.SIG_DFL)
-        
-
-
-
-
-
-if __name__=='__main__':
-    import os
-
-    def example1():
-        widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
-                   ' ', ETA(), ' ', FileTransferSpeed()]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
-        for i in range(1000000):
-            # do something
-            pbar.update(10*i+1)
-        pbar.finish()
-        print
-
-    def example2():
-        class CrazyFileTransferSpeed(FileTransferSpeed):
-            "It's bigger between 45 and 80 percent"
-            def update(self, pbar):
-                if 45 < pbar.percentage() < 80:
-                    return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
-                else:
-                    return FileTransferSpeed.update(self,pbar)
-
-        widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000)
-        # maybe do something
-        pbar.start()
-        for i in range(2000000):
-            # do something
-            pbar.update(5*i+1)
-        pbar.finish()
-        print
-
-    def example3():
-        widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
-        for i in range(1000000):
-            # do something
-            pbar.update(10*i+1)
-        pbar.finish()
-        print
-
-    def example4():
-        widgets = ['Test: ', Percentage(), ' ',
-                   Bar(marker='0',left='[',right=']'),
-                   ' ', ETA(), ' ', FileTransferSpeed()]
-        pbar = ProgressBar(widgets=widgets, maxval=500)
-        pbar.start()
-        for i in range(100,500+1,50):
-            time.sleep(0.2)
-            pbar.update(i)
-        pbar.finish()
-        print
-
-
-    example1()
-    example2()
-    example3()
-    example4()
-

diff -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 -r 5c030d64ef4d74b8d3e5593bfebf57c1c6949f5f yt/extern/progressbar/__init__.py
--- /dev/null
+++ b/yt/extern/progressbar/__init__.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# progressbar  - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""Text progress bar library for Python.
+
+A text progress bar is typically used to display the progress of a long
+running operation, providing a visual cue that processing is underway.
+
+The ProgressBar class manages the current progress, and the format of the line
+is given by a number of widgets. A widget is an object that may display
+differently depending on the state of the progress bar. There are three types
+of widgets:
+ - a string, which always shows itself
+
+ - a ProgressBarWidget, which may return a different value every time its
+   update method is called
+
+ - a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
+   expands to fill the remaining width of the line.
+
+The progressbar module is very easy to use, yet very powerful. It will also
+automatically enable features like auto-resizing when the system supports it.
+"""
+
+__author__ = 'Nilton Volpato'
+__author_email__ = 'first-name dot last-name @ gmail.com'
+__date__ = '2011-05-14'
+__version__ = '2.3'
+
+from compat import *
+from widgets import *
+from progressbar import *

diff -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 -r 5c030d64ef4d74b8d3e5593bfebf57c1c6949f5f yt/extern/progressbar/compat.py
--- /dev/null
+++ b/yt/extern/progressbar/compat.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# progressbar  - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""Compatibility methods and classes for the progressbar module."""
+
+
+# Python 3.x (and backports) use a modified iterator syntax
+# This will allow 2.x to behave with 3.x iterators
+try:
+  next
+except NameError:
+    def next(iter):
+        try:
+            # Try new style iterators
+            return iter.__next__()
+        except AttributeError:
+            # Fallback in case of a "native" iterator
+            return iter.next()
+
+
+# Python < 2.5 does not have "any"
+try:
+  any
+except NameError:
+   def any(iterator):
+      for item in iterator:
+         if item: return True
+      return False

diff -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 -r 5c030d64ef4d74b8d3e5593bfebf57c1c6949f5f yt/extern/progressbar/progressbar.py
--- /dev/null
+++ b/yt/extern/progressbar/progressbar.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# progressbar  - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""Main ProgressBar class."""
+
+from __future__ import division
+
+import math
+import os
+import signal
+import sys
+import time
+import uuid
+
+try:
+    from fcntl import ioctl
+    from array import array
+    import termios
+except ImportError:
+    pass
+
+import widgets
+
+# Test to see if we are in an IPython session.
+ipython = None
+for key in ['KernelApp','IPKernelApp']:
+  try:
+    ipython = get_ipython().config[key]['parent_appname']
+  except (NameError, KeyError):
+    pass
+
+ipython_notebook_css = """
+td.pb_widget {
+    width: auto;
+}
+td.pb_widget_fill {
+    width: 100%;
+}
+table.pb {
+    font-family: monospace;
+    border: 0;
+    margin: 0;
+}
+table.pb tr { border: 0; }
+table.pb td {
+    white-space: nowrap;
+    border: 0;
+}
+div.pb {
+    border: 1px solid #ddd;
+    border-radius: 3px;
+}
+div.pb_bar {
+    height: 1.5em;
+}
+""".replace('\n', ' ')
+
+class UnknownLength: pass
+
+
+class ProgressBar(object):
+    """The ProgressBar class which updates and prints the bar.
+
+    A common way of using it is like:
+    >>> pbar = ProgressBar().start()
+    >>> for i in range(100):
+    ...    # do something
+    ...    pbar.update(i+1)
+    ...
+    >>> pbar.finish()
+
+    You can also use a ProgressBar as an iterator:
+    >>> progress = ProgressBar()
+    >>> for i in progress(some_iterable):
+    ...    # do something
+    ...
+
+    Since the progress bar is incredibly customizable you can specify
+    different widgets of any type in any order. You can even write your own
+    widgets! However, since there are already a good number of widgets you
+    should probably play around with them before moving on to create your own
+    widgets.
+
+    The term_width parameter represents the current terminal width. If the
+    parameter is set to an integer then the progress bar will use that,
+    otherwise it will attempt to determine the terminal width falling back to
+    80 columns if the width cannot be determined.
+
+    When implementing a widget's update method you are passed a reference to
+    the current progress bar. As a result, you have access to the
+    ProgressBar's methods and attributes. Although there is nothing preventing
+    you from changing the ProgressBar you should treat it as read only.
+
+    Useful methods and attributes include (Public API):
+     - currval: current progress (0 <= currval <= maxval)
+     - maxval: maximum (and final) value
+     - finished: True if the bar has finished (reached 100%)
+     - start_time: the time when start() method of ProgressBar was called
+     - seconds_elapsed: seconds elapsed since start_time and last call to
+                        update
+     - percentage(): progress in percent [0..100]
+    """
+
+    __slots__ = ('currval', 'fd', 'finished', 'last_update_time',
+                 'left_justify', 'maxval', 'next_update', 'num_intervals',
+                 'poll', 'seconds_elapsed', 'signal_set', 'start_time',
+                 'term_width', 'update_interval', 'widgets', '_time_sensitive',
+                 '__iterable', 'attr', 'html_written', 'uuid')
+
+    _DEFAULT_MAXVAL = 100
+    _DEFAULT_TERMSIZE = 80
+    _DEFAULT_WIDGETS = [widgets.Percentage, widgets.Bar]
+
+    def __init__(self, maxval=None, widgets=None, term_width=None, poll=1,
+                 left_justify=True, fd=sys.stdout, attr={}):
+        """Initializes a progress bar with sane defaults."""
+
+        # Don't share a reference with any other progress bars
+        if widgets is None:
+            widgets = [widget() for widget in self._DEFAULT_WIDGETS]
+
+        self.maxval = maxval
+        self.widgets = widgets
+        self.fd = fd
+        self.left_justify = left_justify
+
+        self.signal_set = False
+        if term_width is not None:
+            self.term_width = term_width
+        else:
+            try:
+                self._handle_resize()
+                signal.signal(signal.SIGWINCH, self._handle_resize)
+                self.signal_set = True
+            except (SystemExit, KeyboardInterrupt): raise
+            except:
+                self.term_width = self._env_size()
+
+        self.__iterable = None
+        self._update_widgets()
+        self.currval = 0
+        self.finished = False
+        self.last_update_time = None
+        self.poll = poll
+        self.seconds_elapsed = 0
+        self.start_time = None
+        self.update_interval = 1
+        self.attr = attr
+
+        # Set flag so we only write out the HTML once,
+        # then update with javascript
+        self.html_written = False
+
+        self.uuid = str(uuid.uuid4())
+
+        # Install our CSS if we are in an IPython notebook
+        if ipython == 'ipython-notebook':
+            from IPython.display import Javascript, display
+            display(Javascript('//%s\n$("head").append("<style>%s</style>")' %
+                               (self.uuid,ipython_notebook_css)))
+            
+            # Also add a function that removes progressbar output from the cells
+            js = '''
+                  // %s -- used to remove this code blob in the end
+                  IPython.OutputArea.prototype.cleanProgressBar = function(uuids) {
+                      // filter by uuid-strings 
+                      var myfilter = function(output) { 
+                          var nuids = uuids.length;
+                          for (var i=0; i<nuids; i++) {
+                              if (output.hasOwnProperty('html')) {
+                                  if (output.html.indexOf(uuids[i]) != -1) {
+                                      return false;
+                                  }
+                              }
+                              if (output.hasOwnProperty('javascript')) {
+                                  if (output.javascript.indexOf(uuids[i]) != -1) {
+                                      return false;
+                                  }
+                              }
+                          }
+                          // keep all others
+                          return true;
+                      };
+
+                      // Filter the ouputs
+                      this.outputs = this.outputs.filter(myfilter);
+                };
+                ''' % self.uuid
+            display(Javascript(js))
+
+    def __call__(self, iterable):
+        """Use a ProgressBar to iterate through an iterable."""
+
+        try:
+            self.maxval = len(iterable)
+        except:
+            if self.maxval is None:
+                self.maxval = UnknownLength
+
+        self.__iterable = iter(iterable)
+        return self
+
+
+    def __iter__(self):
+        return self
+
+
+    def __next__(self):
+        try:
+            value = next(self.__iterable)
+            if self.start_time is None: self.start()
+            else: self.update(self.currval + 1)
+            return value
+        except StopIteration:
+            self.finish()
+            raise
+
+
+    # Create an alias so that Python 2.x won't complain about not being
+    # an iterator.
+    next = __next__
+
+
+    def _env_size(self):
+        """Tries to find the term_width from the environment."""
+
+        return int(os.environ.get('COLUMNS', self._DEFAULT_TERMSIZE)) - 1
+
+
+    def _handle_resize(self, signum=None, frame=None):
+        """Tries to catch resize signals sent from the terminal."""
+
+        h, w = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\0' * 8))[:2]
+        self.term_width = w
+
+
+    def percentage(self):
+        """Returns the progress as a percentage."""
+        return self.currval * 100.0 / self.maxval
+
+    percent = property(percentage)
+
+
+    def _format_widgets(self):
+        result = []
+        expanding = []
+        width = self.term_width
+
+        for index, widget in enumerate(self.widgets):
+            if isinstance(widget, widgets.WidgetHFill):
+                result.append(widget)
+                expanding.insert(0, index)
+            else:
+                widget = widgets.format_updatable(widget, self)
+                result.append(widget)
+                width -= len(widget)
+
+        count = len(expanding)
+        while count:
+            portion = max(int(math.ceil(width * 1. / count)), 0)
+            index = expanding.pop()
+            count -= 1
+
+            widget = result[index].update(self, portion)
+            width -= len(widget)
+            result[index] = widget
+
+        return result
+
+
+    def _format_line(self):
+        """Joins the widgets and justifies the line."""
+
+        widgets = ''.join(self._format_widgets())
+
+        if self.left_justify: return widgets.ljust(self.term_width)
+        else: return widgets.rjust(self.term_width)
+
+
+    def _format_html(self):
+      html = '<div class="pb" id="%s"><table class="pb ui-widget"><tr>\n' % self.uuid
+      for widget in self.widgets:
+        if isinstance(widget, widgets.WidgetHFill):
+          td_class = 'pb_widget_fill'
+        else:
+          td_class = 'pb_widget'
+
+        html += ('<td class="%s">' % td_class) + \
+                widgets.format_updatable_html(widget, self) + \
+                '</td>\n'
+      html += '</tr></table><div>'
+      return html
+
+
+    def _need_update(self):
+        """Returns whether the ProgressBar should redraw the line."""
+        if self.currval >= self.next_update or self.finished: return True
+
+        delta = time.time() - self.last_update_time
+        return self._time_sensitive and delta > self.poll
+
+
+    def _update_widgets(self):
+        """Checks all widgets for the time sensitive bit."""
+
+        self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
+                                    for w in self.widgets)
+
+
+    def update(self, value=None, attr={}):
+        """Updates the ProgressBar to a new value."""
+
+        if value is not None and value is not UnknownLength:
+            if (self.maxval is not UnknownLength
+                and not 0 <= value <= self.maxval):
+
+                raise ValueError('Value out of range')
+
+            self.currval = value
+
+        self.attr.update(attr)
+
+        if not self._need_update(): return
+        if self.start_time is None:
+            raise RuntimeError('You must call "start" before calling "update"')
+
+        now = time.time()
+        self.seconds_elapsed = now - self.start_time
+        self.next_update = self.currval + self.update_interval
+
+        if ipython == 'ipython-notebook':
+            if not self.html_written:
+                # We have yet to display the HTML, do that first
+                from IPython.display import HTML, display
+                display(HTML(self._format_html()))
+                self.html_written = True
+            else:
+                # The HTML has been written once, now update with JS
+                from IPython.display import Javascript, display
+                for widget in self.widgets:
+                    js = widgets.updatable_js(widget, self)
+                    if js:
+                        display(Javascript(js))
+        else:
+            self.fd.write('\r' + self._format_line())
+            self.fd.flush()
+
+        self.last_update_time = now
+
+
+    def start(self):
+        """Starts measuring time, and prints the bar at 0%.
+
+        It returns self so you can use it like this:
+        >>> pbar = ProgressBar().start()
+        >>> for i in range(100):
+        ...    # do something
+        ...    pbar.update(i+1)
+        ...
+        >>> pbar.finish()
+        """
+
+        if self.maxval is None:
+            self.maxval = self._DEFAULT_MAXVAL
+
+        self.num_intervals = max(100, self.term_width)
+        self.next_update = 0
+
+        if self.maxval is not UnknownLength:
+            if self.maxval < 0: raise ValueError('Value out of range')
+            self.update_interval = self.maxval / self.num_intervals
+
+
+        self.start_time = self.last_update_time = time.time()
+        self.html_written = False
+        self.finished = False
+        self.update(0)
+
+        return self
+
+
+    def finish(self):
+        """Puts the ProgressBar bar in the finished state."""
+
+        self.finished = True
+        self.update(self.maxval)
+        self.start_time = None
+
+        # Clean up notebook stuff, quite differently from regular
+        if not ipython == 'ipython-notebook':
+            self.fd.write('\n')
+        else:
+            from IPython.display import Javascript, display
+            # First delete the node that held the progress bar from the page
+            js = """var element = document.getElementById('%s');
+                    element.parentNode.removeChild(element);""" % self.uuid
+            display(Javascript(js))
+
+            # Then also remove its trace from the cell output (so it doesn't get
+            # stored with the notebook). This needs to be done for all widgets as
+            # well as for progressBar
+            uuids = [str(self.uuid)]
+            uuids += [w.uuid for w in self.widgets if isinstance(w, widgets.Widget)]
+            display(Javascript('this.cleanProgressBar(%s)' % uuids))
+
+        if self.signal_set:
+            signal.signal(signal.SIGWINCH, signal.SIG_DFL)

diff -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 -r 5c030d64ef4d74b8d3e5593bfebf57c1c6949f5f yt/extern/progressbar/widgets.py
--- /dev/null
+++ b/yt/extern/progressbar/widgets.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# progressbar  - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""Default ProgressBar widgets."""
+
+from __future__ import division
+
+import datetime
+import math
+import uuid
+
+try:
+    from abc import ABCMeta, abstractmethod
+except ImportError:
+    AbstractWidget = object
+    abstractmethod = lambda fn: fn
+else:
+    AbstractWidget = ABCMeta('AbstractWidget', (object,), {})
+
+
+def format_updatable(updatable, pbar):
+    if hasattr(updatable, 'update'): return updatable.update(pbar)
+    else: return updatable
+
+def format_updatable_html(updatable, pbar):
+    if hasattr(updatable, 'update_html'): return updatable.update_html(pbar)
+    else: return updatable
+
+def updatable_js(updatable, pbar):
+    if hasattr(updatable, 'update_js'): return updatable.update_js(pbar)
+    else: return None
+
+
+class Widget(AbstractWidget):
+    """The base class for all widgets.
+
+    The ProgressBar will call the widget's update value when the widget should
+    be updated. The widget's size may change between calls, but the widget may
+    display incorrectly if the size changes drastically and repeatedly.
+
+    The boolean TIME_SENSITIVE informs the ProgressBar that it should be
+    updated more often because it is time sensitive.
+    """
+
+    TIME_SENSITIVE = False
+    __slots__ = ()
+    uuid = None
+
+    @abstractmethod
+    def update(self, pbar):
+        """Updates the widget.
+
+        pbar - a reference to the calling ProgressBar
+        """
+
+    def update_html(self, pbar):
+        if self.uuid is None:
+            self.uuid = str(uuid.uuid4())
+        return '<div id="%s">%s</div>' % (self.uuid, self.update(pbar))
+
+    def update_js(self, pbar):
+        if self.uuid is None:
+            self.uuid = str(uuid.uuid4())
+        return "$('div#%s').text('%s');" % (self.uuid, self.update(pbar))
+
+
+class WidgetHFill(Widget):
+    """The base class for all variable width widgets.
+
+    This widget is much like the \\hfill command in TeX, it will expand to
+    fill the line. You can use more than one in the same line, and they will
+    all have the same width, and together will fill the line.
+    """
+
+    DEFAULT_WIDTH = 50
+
+    @abstractmethod
+    def update(self, pbar, width=DEFAULT_WIDTH):
+        """Updates the widget providing the total width the widget must fill.
+
+        pbar - a reference to the calling ProgressBar
+        width - The total width the widget must fill
+        """
+
+
+class Timer(Widget):
+    """Widget which displays the elapsed seconds."""
+
+    __slots__ = ('format_string',)
+    TIME_SENSITIVE = True
+
+    def __init__(self, format='Elapsed Time: %s'):
+        self.format_string = format
+
+    @staticmethod
+    def format_time(seconds):
+        """Formats time as the string "HH:MM:SS"."""
+
+        return str(datetime.timedelta(seconds=int(seconds)))
+
+
+    def update(self, pbar):
+        """Updates the widget to show the elapsed time."""
+
+        return self.format_string % self.format_time(pbar.seconds_elapsed)
+
+
+class ETA(Timer):
+    """Widget which attempts to estimate the time of arrival."""
+
+    TIME_SENSITIVE = True
+
+    def update(self, pbar):
+        """Updates the widget to show the ETA or total time when finished."""
+
+        if pbar.currval == 0:
+            return 'ETA:  --:--:--'
+        elif pbar.finished:
+            return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
+        else:
+            elapsed = pbar.seconds_elapsed
+            eta = elapsed * pbar.maxval / pbar.currval - elapsed
+            return 'ETA:  %s' % self.format_time(eta)
+
+
+class FileTransferSpeed(Widget):
+    """Widget for showing the transfer speed (useful for file transfers)."""
+
+    FORMAT = '%6.2f %s%s/s'
+    PREFIXES = ' kMGTPEZY'
+    __slots__ = ('unit',)
+
+    def __init__(self, unit='B'):
+        self.unit = unit
+
+    def update(self, pbar):
+        """Updates the widget with the current SI prefixed speed."""
+
+        if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0
+            scaled = power = 0
+        else:
+            speed = pbar.currval / pbar.seconds_elapsed
+            power = int(math.log(speed, 1000))
+            scaled = speed / 1000.**power
+
+        return self.FORMAT % (scaled, self.PREFIXES[power], self.unit)
+
+
+class AnimatedMarker(Widget):
+    """An animated marker for the progress bar which defaults to appear as if
+    it were rotating.
+    """
+
+    __slots__ = ('markers', 'curmark')
+
+    def __init__(self, markers='|/-\\'):
+        self.markers = markers
+        self.curmark = -1
+
+    def update(self, pbar):
+        """Updates the widget to show the next marker or the first marker when
+        finished"""
+
+        if pbar.finished: return self.markers[0]
+
+        self.curmark = (self.curmark + 1) % len(self.markers)
+        return self.markers[self.curmark]
+
+# Alias for backwards compatibility
+RotatingMarker = AnimatedMarker
+
+
+class Counter(Widget):
+    """Displays the current count."""
+
+    __slots__ = ('format_string',)
+
+    def __init__(self, format='%d'):
+        self.format_string = format
+
+    def update(self, pbar):
+        return self.format_string % pbar.currval
+
+
+class Attribute(Widget):
+    """Displays the values of ProgressBar attributes.
+
+    attr_name - ProgressBar attribute dictionary key or list of keys
+    format_string - Format for the output. Attributes are looked up according
+      to attr_name and then used as a tuple with this format string, i.e.
+      format_string % attr_tuple
+    fallback - If an attribute lookup fails, this string is displayed instead.
+
+    """
+
+    __slots__ = ('attr_name', 'format_string', 'fallback')
+
+    def __init__(self, attr_name, format='%s', fallback='?'):
+        self.attr_name = attr_name
+        self.format_string = format
+        self.fallback = fallback
+
+    def update(self, pbar):
+        try:
+          if isinstance(self.attr_name, basestring) or len(self.attr_name) == 1:
+            # If attr_name is just a string or a single item,
+            # use it as the key as is
+            format_vars = (pbar.attr[self.attr_name],)
+          else:
+            # else, expand it as a tuple of attributes
+            format_vars = tuple([pbar.attr[a] for a in self.attr_name])
+          return self.format_string % format_vars
+        except KeyError:
+          return self.fallback
+
+
+class Percentage(Widget):
+    """Displays the current percentage as a number with a percent sign."""
+
+    def update(self, pbar):
+        return '%3d%%' % pbar.percentage()
+
+
+class FormatLabel(Timer):
+    """Displays a formatted label."""
+
+    mapping = {
+        'elapsed': ('seconds_elapsed', Timer.format_time),
+        'finished': ('finished', None),
+        'last_update': ('last_update_time', None),
+        'max': ('maxval', None),
+        'seconds': ('seconds_elapsed', None),
+        'start': ('start_time', None),
+        'value': ('currval', None)
+    }
+
+    __slots__ = ('format_string',)
+    def __init__(self, format):
+        self.format_string = format
+
+    def update(self, pbar):
+        context = {}
+        for name, (key, transform) in self.mapping.items():
+            try:
+                value = getattr(pbar, key)
+
+                if transform is None:
+                   context[name] = value
+                else:
+                   context[name] = transform(value)
+            except: pass
+
+        return self.format_string % context
+
+
+class SimpleProgress(Widget):
+    """Returns progress as a count of the total (e.g.: "5 of 47")."""
+
+    __slots__ = ('sep',)
+
+    def __init__(self, sep=' of '):
+        self.sep = sep
+
+    def update(self, pbar):
+        return '%d%s%d' % (pbar.currval, self.sep, pbar.maxval)
+
+
+class Bar(WidgetHFill):
+    """A progress bar which stretches to fill the line."""
+
+    __slots__ = ('marker', 'left', 'right', 'fill', 'fill_left')
+
+    def __init__(self, marker='#', left='|', right='|', fill=' ',
+                 fill_left=True):
+        """Creates a customizable progress bar.
+
+        marker - string or updatable object to use as a marker
+        left - string or updatable object to use as a left border
+        right - string or updatable object to use as a right border
+        fill - character to use for the empty part of the progress bar
+        fill_left - whether to fill from the left or the right
+        """
+        self.marker = marker
+        self.left = left
+        self.right = right
+        self.fill = fill
+        self.fill_left = fill_left
+
+    def update(self, pbar, width=WidgetHFill.DEFAULT_WIDTH):
+        """Updates the progress bar and its subcomponents."""
+
+        left, marked, right = (format_updatable(i, pbar) for i in
+                               (self.left, self.marker, self.right))
+
+        width -= len(left) + len(right)
+        # Marked must *always* have length of 1
+        if pbar.maxval:
+          marked *= int(pbar.currval / pbar.maxval * width)
+        else:
+          marked = ''
+
+        if self.fill_left:
+            return '%s%s%s' % (left, marked.ljust(width, self.fill), right)
+        else:
+            return '%s%s%s' % (left, marked.rjust(width, self.fill), right)
+
+
+    def update_html(self, pbar):
+        if self.uuid is None:
+            self.uuid = str(uuid.uuid4())
+        return """
+        <div class="pb_bar" id="%s"></div>
+        <script type="text/javascript">
+            $("div#%s").progressbar({value: 0, max: %d});
+        </script>
+        """ % (self.uuid, self.uuid,pbar.maxval)
+
+
+    def update_js(self, pbar):
+        if self.uuid is None:
+            self.uuid = str(uuid.uuid4())
+        return """
+        var $myPB = $("div#{divid}")
+        if ($myPB.hasClass('ui-progressbar')) {{
+            $myPB.progressbar('value', {pbar.currval:d});
+        }} else {{
+            $myPB.progressbar({{value: 0, max: {pbar.maxval:d}}});
+        }}
+        """.format(divid=self.uuid, pbar=pbar)
+
+
+class ReverseBar(Bar):
+    """A bar which has a marker which bounces from side to side."""
+
+    def __init__(self, marker='#', left='|', right='|', fill=' ',
+                 fill_left=False):
+        """Creates a customizable progress bar.
+
+        marker - string or updatable object to use as a marker
+        left - string or updatable object to use as a left border
+        right - string or updatable object to use as a right border
+        fill - character to use for the empty part of the progress bar
+        fill_left - whether to fill from the left or the right
+        """
+        self.marker = marker
+        self.left = left
+        self.right = right
+        self.fill = fill
+        self.fill_left = fill_left
+
+
+class BouncingBar(Bar):
+    def update(self, pbar, width=WidgetHFill.DEFAULT_WIDTH):
+        """Updates the progress bar and its subcomponents."""
+
+        left, marker, right = (format_updatable(i, pbar) for i in
+                               (self.left, self.marker, self.right))
+
+        width -= len(left) + len(right)
+
+        if pbar.finished: return '%s%s%s' % (left, width * marker, right)
+
+        position = int(pbar.currval % (width * 2 - 1))
+        if position > width: position = width * 2 - position
+        lpad = self.fill * (position - 1)
+        rpad = self.fill * (width - len(marker) - len(lpad))
+
+        # Swap if we want to bounce the other way
+        if not self.fill_left: rpad, lpad = lpad, rpad
+
+        return '%s%s%s%s%s' % (left, lpad, marker, rpad, right)

diff -r eea2615f7e8a2d3639c2cffcd1c9ec73f62b17a4 -r 5c030d64ef4d74b8d3e5593bfebf57c1c6949f5f yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -337,7 +337,6 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
@@ -345,12 +344,13 @@
         return ExtProgressBar(title, maxval)
     elif ytcfg.getboolean("yt", "__parallel"):
         return ParallelProgressBar(title, maxval)
-    widgets = [ title,
-            pb.Percentage(), ' ',
-            pb.Bar(marker=pb.RotatingMarker()),
-            ' ', pb.ETA(), ' ']
-    pbar = pb.ProgressBar(widgets=widgets,
-                          maxval=maxval).start()
+    else:
+        widgets = [ title,
+                    pb.Percentage(), ' ',
+                    pb.Bar(marker=pb.RotatingMarker()),
+                    ' ', pb.ETA(), ' ']
+        pbar = pb.ProgressBar(widgets=widgets,
+                              maxval=maxval).start()
     return pbar
 
 def only_on_root(func, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/306864427705/
Changeset:   306864427705
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-11-21 19:17:13
Summary:     Adding an __init__.py file to the ramses frontend, which was missing.
Affected #:  1 file



https://bitbucket.org/yt_analysis/yt/commits/d14dd39a8b5b/
Changeset:   d14dd39a8b5b
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-11-21 19:29:47
Summary:     Re-adding frontend files that should not have been removed.
Affected #:  4 files

diff -r 3068644277057cee18a6309c0e3eaddbba419625 -r d14dd39a8b5b909f6db9328a5851ef2dd0fcbb21 yt/frontends/art/__init__.py
--- /dev/null
+++ b/yt/frontends/art/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.art
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 3068644277057cee18a6309c0e3eaddbba419625 -r d14dd39a8b5b909f6db9328a5851ef2dd0fcbb21 yt/frontends/art/setup.py
--- /dev/null
+++ b/yt/frontends/art/setup.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+import setuptools
+import os, sys, os.path
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('art',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 3068644277057cee18a6309c0e3eaddbba419625 -r d14dd39a8b5b909f6db9328a5851ef2dd0fcbb21 yt/frontends/ramses/__init__.py
--- a/yt/frontends/ramses/__init__.py
+++ b/yt/frontends/ramses/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.ramses
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 3068644277057cee18a6309c0e3eaddbba419625 -r d14dd39a8b5b909f6db9328a5851ef2dd0fcbb21 yt/frontends/ramses/setup.py
--- /dev/null
+++ b/yt/frontends/ramses/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+import glob
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('ramses', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


https://bitbucket.org/yt_analysis/yt/commits/e04142fa31be/
Changeset:   e04142fa31be
Branch:      yt-3.0
User:        ChrisMalone
Date:        2013-11-21 20:53:23
Summary:     this at least reads in the 2d datasets, but weighting isn't quite right
Affected #:  1 file

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r e04142fa31befe213b454ae87e998ca7ea46db23 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -52,11 +52,21 @@
 # instead of e's.
 _scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
 # This is the dimensions in the Cell_H file for each level
-_dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
+# It is different for different dimensionalities, so we make a list
+_dim_finder = [ \
+    re.compile(r"\(\((\d+)\) \((\d+)\) \(\d+\)\)$"),
+    re.compile(r"\(\((\d+,\d+)\) \((\d+,\d+)\) \(\d+,\d+\)\)$"),
+    re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")]
 # This is the line that prefixes each set of data for a FAB in the FAB file
-_header_pattern = re.compile(r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), " +
-                             r"\(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) " +
-                             "\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")
+# It is different for different dimensionalities, so we make a list
+_endian_regex = r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), \(([0-9 ]+)\)\)\)"
+_header_pattern = [ \
+    re.compile(_endian_regex + 
+               r"\(\((\d+)\) \((\d+)\) \((\d+)\)\) (\d+)\n"),
+    re.compile(_endian_regex + 
+               r"\(\((\d+,\d+)\) \((\d+,\d+)\) \((\d+,\d+)\)\) (\d+)\n"),
+    re.compile(_endian_regex + 
+               r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")]
 
 
 
@@ -141,6 +151,7 @@
 
         GridGeometryHandler.__init__(self, pf, data_style)
         self._cache_endianness(self.grids[-1])
+
         #self._read_particles()
 
     def _parse_hierarchy(self):
@@ -149,31 +160,50 @@
         """
         self.max_level = self.parameter_file._max_level
         header_file = open(self.header_filename,'r')
-        # We can now skip to the point in the file we want to start parsing at.
+
+        self.dimensionality = self.parameter_file.dimensionality
+        _our_dim_finder = _dim_finder[self.dimensionality-1]
+        DRE = self.parameter_file.domain_right_edge # shortcut
+        DLE = self.parameter_file.domain_left_edge # shortcut
+
+        # We can now skip to the point in the file we want to start parsing.
         header_file.seek(self.parameter_file._header_mesh_start)
 
         dx = []
         for i in range(self.max_level + 1):
             dx.append([float(v) for v in header_file.next().split()])
+            # account for non-3d data sets
+            if self.dimensionality < 2:
+                dx[i].append(DRE[1] - DLE[1])
+            if self.dimensionality < 3:
+                dx[i].append(DRE[2] - DLE[1])
         self.level_dds = np.array(dx, dtype="float64")
         if int(header_file.next()) != 0:
             raise RunTimeError("yt only supports cartesian coordinates.")
         if int(header_file.next()) != 0:
             raise RunTimeError("INTERNAL ERROR! This should be a zero.")
 
-        # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        # each level is one group with ngrids on it. 
+        # each grid has self.dimensionality number of lines of 2 reals 
         self.grids = []
         grid_counter = 0
         for level in range(self.max_level + 1):
             vals = header_file.next().split()
-            # should this be grid_time or level_time??
-            lev, ngrids, grid_time = int(vals[0]),int(vals[1]),float(vals[2])
+            lev, ngrids, cur_time = int(vals[0]),int(vals[1]),float(vals[2])
             assert(lev == level)
             nsteps = int(header_file.next())
             for gi in range(ngrids):
                 xlo, xhi = [float(v) for v in header_file.next().split()]
-                ylo, yhi = [float(v) for v in header_file.next().split()]
-                zlo, zhi = [float(v) for v in header_file.next().split()]
+                if self.dimensionality > 1:
+                    ylo, yhi = [float(v) for v in header_file.next().split()]
+                else:
+#                    ylo, yhi = DLE[1], DRE[1]
+                    ylo, yhi = 0.0, 1.0
+                if self.dimensionality > 2:
+                    zlo, zhi = [float(v) for v in header_file.next().split()]
+                else:
+#                    zlo, zhi = DLE[2], DRE[2]
+                    zlo, zhi = 0.0, 1.0
                 self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
                 self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
             # Now we get to the level header filename, which we open and parse.
@@ -181,14 +211,13 @@
                               header_file.next().strip())
             level_header_file = open(fn + "_H")
             level_dir = os.path.dirname(fn)
-            # We skip the first two lines, although they probably contain
-            # useful information I don't know how to decipher.
+            # We skip the first two lines, which contain BoxLib header file
+            # version and 'how' the data was written
             level_header_file.next()
             level_header_file.next()
-            # Now we get the number of data files
+            # Now we get the number of components
             ncomp_this_file = int(level_header_file.next())
-            # Skip the next line, and we should get the number of grids / FABs
-            # in this file
+            # Skip the next line, which contains the number of ghost zones
             level_header_file.next()
             # To decipher this next line, we expect something like:
             # (8 0
@@ -197,7 +226,10 @@
             # Now we can iterate over each and get the indices.
             for gi in range(ngrids):
                 # components within it
-                start, stop = _dim_finder.match(level_header_file.next()).groups()
+                start, stop = _our_dim_finder.match(level_header_file.next()).groups()
+                # fix for non-3d data
+                start += ',0'*(3-self.dimensionality)
+                stop  += ',1'*(3-self.dimensionality)
                 start = np.array(start.split(","), dtype="int64")
                 stop = np.array(stop.split(","), dtype="int64")
                 dims = stop - start + 1
@@ -206,8 +238,7 @@
             # Now we read two more lines.  The first of these is a close
             # parenthesis.
             level_header_file.next()
-            # This line I'm not 100% sure of, but it's either number of grids
-            # or number of FABfiles.
+            # The next is again the number of grids
             level_header_file.next()
             # Now we iterate over grids to find their offsets in each file.
             for gi in range(ngrids):
@@ -235,7 +266,7 @@
             header = f.readline()
         
         bpr, endian, start, stop, centering, nc = \
-            _header_pattern.search(header).groups()
+            _header_pattern[self.dimensionality-1].search(header).groups()
         # Note that previously we were using a different value for BPR than we
         # use now.  Here is an example set of information directly from BoxLib:
         #  * DOUBLE data
@@ -494,8 +525,8 @@
                            for i in range(n_fields)]
 
         self.dimensionality = int(header_file.readline())
-        if self.dimensionality != 3:
-            raise RunTimeError("Boxlib 1D and 2D support not currently available.")
+#        if self.dimensionality != 3:
+#            raise RunTimeError("Boxlib 1D and 2D support not currently available.")
         self.current_time = float(header_file.readline())
         # This is traditionally a hierarchy attribute, so we will set it, but
         # in a slightly hidden variable.
@@ -546,6 +577,40 @@
         header_file.readline()
         self._header_mesh_start = header_file.tell()
 
+        # overrides for 1/2-dimensional data
+        if self.dimensionality == 1: 
+            self._setup1d()
+        elif self.dimensionality == 2: 
+            self._setup2d()
+
+    def _setup1d(self):
+#        self._hierarchy_class = BoxlibHierarchy1D
+#        self._fieldinfo_fallback = Orion1DFieldInfo
+        self.domain_left_edge = \
+            np.concatenate([self.domain_left_edge, [0.0, 0.0]])
+        self.domain_right_edge = \
+            np.concatenate([self.domain_right_edge, [1.0, 1.0]])
+        tmp = self.domain_dimensions.tolist()
+        tmp.extend((1,1))
+        self.domain_dimensions = np.array(tmp)
+        tmp = list(self.periodicity)
+        tmp[1:] = False
+        self.periodicity = ensure_tuple(tmp)
+        
+    def _setup2d(self):
+#        self._hierarchy_class = BoxlibHierarchy2D
+#        self._fieldinfo_fallback = Orion2DFieldInfo
+        self.domain_left_edge = \
+            np.concatenate([self.domain_left_edge, [0.0]])
+        self.domain_right_edge = \
+            np.concatenate([self.domain_right_edge, [1.0]])
+        tmp = self.domain_dimensions.tolist()
+        tmp.append(1)
+        self.domain_dimensions = np.array(tmp)
+        tmp = list(self.periodicity)
+        tmp[2] = False
+        self.periodicity = ensure_tuple(tmp)
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file


https://bitbucket.org/yt_analysis/yt/commits/fe4cb143777c/
Changeset:   fe4cb143777c
Branch:      yt-3.0
User:        ChrisMalone
Date:        2013-11-21 23:15:03
Summary:     the culprit was the grid_dimensions definition
Affected #:  1 file

diff -r e04142fa31befe213b454ae87e998ca7ea46db23 -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -197,12 +197,10 @@
                 if self.dimensionality > 1:
                     ylo, yhi = [float(v) for v in header_file.next().split()]
                 else:
-#                    ylo, yhi = DLE[1], DRE[1]
                     ylo, yhi = 0.0, 1.0
                 if self.dimensionality > 2:
                     zlo, zhi = [float(v) for v in header_file.next().split()]
                 else:
-#                    zlo, zhi = DLE[2], DRE[2]
                     zlo, zhi = 0.0, 1.0
                 self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
                 self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
@@ -227,9 +225,10 @@
             for gi in range(ngrids):
                 # components within it
                 start, stop = _our_dim_finder.match(level_header_file.next()).groups()
-                # fix for non-3d data
+                # fix for non-3d data 
+                # note we append '0' to both ends b/c of the '+1' in dims below
                 start += ',0'*(3-self.dimensionality)
-                stop  += ',1'*(3-self.dimensionality)
+                stop  += ',0'*(3-self.dimensionality)
                 start = np.array(start.split(","), dtype="int64")
                 stop = np.array(stop.split(","), dtype="int64")
                 dims = stop - start + 1
@@ -525,8 +524,6 @@
                            for i in range(n_fields)]
 
         self.dimensionality = int(header_file.readline())
-#        if self.dimensionality != 3:
-#            raise RunTimeError("Boxlib 1D and 2D support not currently available.")
         self.current_time = float(header_file.readline())
         # This is traditionally a hierarchy attribute, so we will set it, but
         # in a slightly hidden variable.


https://bitbucket.org/yt_analysis/yt/commits/a788d5a847d9/
Changeset:   a788d5a847d9
Branch:      yt-3.0
User:        ChrisMalone
Date:        2013-11-21 23:24:21
Summary:     Merged yt_analysis/yt-3.0 into yt-3.0
Affected #:  66 files

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
 include distribute_setup.py README* CREDITS COPYING.txt CITATION
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE kdtree2-README
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there!  You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses.  It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there!  You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms.  It's written in 
+python and heavily leverages both NumPy and Matplotlib for fast arrays and 
+visualization, respectively.
 
 Full documentation and a user community can be found at:
 
 http://yt-project.org/
+
 http://yt-project.org/doc/
 
 If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
 doc/install_script.sh .  You will have to set the destination directory, and
 there are options available, but it should be straightforward.
 
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or 
+ways to help development, please visit our website.
 
 Enjoy!

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
 get_ytdata xray_emissivity.h5
 
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
@@ -918,6 +923,8 @@
 do_setup_py $SYMPY
 [ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
+( ${DEST_DIR}/bin/pip install jinja2 2>&1 ) 1>> ${LOG_FILE}
+
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -86,6 +86,10 @@
     #Empty fit without any lines
     yFit = na.ones(len(fluxData))
 
+    #Force the first and last flux pixel to be 1 to prevent OOB
+    fluxData[0]=1
+    fluxData[-1]=1
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -120,9 +124,10 @@
                     z,fitLim,minError*(b[2]-b[1]),speciesDict)
 
             #Check existence of partner lines if applicable
-            newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                    b, minError*(b[2]-b[1]),
-                    x0, xRes, speciesDict)
+            if len(speciesDict['wavelength']) != 1:
+                newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
+                        b, minError*(b[2]-b[1]),
+                        x0, xRes, speciesDict)
 
             #If flagged as a bad fit, species is lyman alpha,
             #   and it may be a saturated line, use special tools
@@ -548,6 +553,10 @@
         #Index of the redshifted wavelength
         indexRedWl = (redWl-x0)/xRes
 
+        #Check to see if even in flux range
+        if indexRedWl > len(y):
+            return False
+
         #Check if surpasses minimum absorption bound
         if y[int(indexRedWl)]>fluxMin:
             return False

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,16 @@
 from .radmc3d_export.api import \
     RadMC3DWriter
 
+from .particle_trajectories.api import \
+    ParticleTrajectories
+
+from .photon_simulator.api import \
+     PhotonList, \
+     EventList, \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel, \
+     PhotonModel, \
+     ThermalPhotonModel

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
         cosmology_splice = []
-
+ 
+        if near_redshift == far_redshift:
+            self.simulation.get_time_series(redshifts=[near_redshift])
+            cosmology_splice.append({'time': self.simulation[0].current_time,
+                                     'redshift': self.simulation[0].current_redshift,
+                                     'filename': os.path.join(self.simulation[0].fullpath,
+                                                              self.simulation[0].basename),
+                                     'next': None})
+            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+                       (cosmology_splice[0]['filename'], near_redshift))
+            return cosmology_splice
+        
         # Use minimum number of datasets to go from z_i to z_f.
         if minimal:
 

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
     only_on_root, \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import \
+     speed_of_light_cgs, \
+     cm_per_km
 
 class LightRay(CosmologySplice):
     """
@@ -51,7 +54,9 @@
     near_redshift : float
         The near (lowest) redshift for the light ray.
     far_redshift : float
-        The far (highest) redshift for the light ray.
+        The far (highest) redshift for the light ray.  NOTE: in order 
+        to use only a single dataset in a light ray, set the 
+        near_redshift and far_redshift to be the same.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
@@ -111,65 +116,92 @@
                                        time_data=time_data,
                                        redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, filename=None):
+    def _calculate_light_ray_solution(self, seed=None, 
+                                      start_position=None, end_position=None,
+                                      trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
         np.random.seed(seed)
 
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
+        # If using only one dataset, set start and stop manually.
+        if start_position is not None:
+            if len(self.light_ray_solution) > 1:
+                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+            if not ((end_position is None) ^ (trajectory is None)):
+                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if end_position is not None:
+                self.light_ray_solution[0]['end'] = np.array(end_position)
+            else:
+                # assume trajectory given as r, theta, phi
+                if len(trajectory) != 3:
+                    raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+                r, theta, phi = trajectory
+                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+                  r * np.array([np.cos(phi) * np.sin(theta),
+                                np.sin(phi) * np.sin(theta),
+                                np.cos(theta)])
+            self.light_ray_solution[0]['traversal_box_fraction'] = \
+              vector_length(self.light_ray_solution[0]['start'], 
+                            self.light_ray_solution[0]['end'])
 
-        for q in range(len(self.light_ray_solution)):
-            if (q == len(self.light_ray_solution) - 1):
-                z_next = self.near_redshift
-            else:
-                z_next = self.light_ray_solution[q+1]['redshift']
+        # the normal way (random start positions and trajectories for each dataset)
+        else:
+            
+            # For box coherence, keep track of effective depth travelled.
+            box_fraction_used = 0.0
 
-            # Calculate fraction of box required for a depth of delta z
-            self.light_ray_solution[q]['traversal_box_fraction'] = \
-                self.cosmology.ComovingRadialDistance(\
-                z_next, self.light_ray_solution[q]['redshift']) * \
-                self.simulation.hubble_constant / \
-                self.simulation.box_size
+            for q in range(len(self.light_ray_solution)):
+                if (q == len(self.light_ray_solution) - 1):
+                    z_next = self.near_redshift
+                else:
+                    z_next = self.light_ray_solution[q+1]['redshift']
 
-            # Simple error check to make sure more than 100% of box depth
-            # is never required.
-            if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_ray_solution[q]['redshift'], z_next,
-                             self.light_ray_solution[q]['traversal_box_fraction']))
-                mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_ray_solution[q]['deltazMax'],
-                             self.light_ray_solution[q]['redshift']-z_next))
+                # Calculate fraction of box required for a depth of delta z
+                self.light_ray_solution[q]['traversal_box_fraction'] = \
+                    self.cosmology.ComovingRadialDistance(\
+                    z_next, self.light_ray_solution[q]['redshift']) * \
+                    self.simulation.hubble_constant / \
+                    self.simulation.box_size
 
-            # Get dataset axis and center.
-            # If using box coherence, only get start point and vector if
-            # enough of the box has been used,
-            # or if box_fraction_used will be greater than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used >
-                     self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                # Random start point
-                self.light_ray_solution[q]['start'] = np.random.random(3)
-                theta = np.pi * np.random.random()
-                phi = 2 * np.pi * np.random.random()
-                box_fraction_used = 0.0
-            else:
-                # Use end point of previous segment and same theta and phi.
-                self.light_ray_solution[q]['start'] = \
-                  self.light_ray_solution[q-1]['end'][:]
+                # Simple error check to make sure more than 100% of box depth
+                # is never required.
+                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+                                (self.light_ray_solution[q]['redshift'], z_next,
+                                 self.light_ray_solution[q]['traversal_box_fraction']))
+                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+                                (self.light_ray_solution[q]['deltazMax'],
+                                 self.light_ray_solution[q]['redshift']-z_next))
 
-            self.light_ray_solution[q]['end'] = \
-              self.light_ray_solution[q]['start'] + \
-                self.light_ray_solution[q]['traversal_box_fraction'] * \
-                np.array([np.cos(phi) * np.sin(theta),
-                          np.sin(phi) * np.sin(theta),
-                          np.cos(theta)])
-            box_fraction_used += \
-              self.light_ray_solution[q]['traversal_box_fraction']
+                # Get dataset axis and center.
+                # If using box coherence, only get start point and vector if
+                # enough of the box has been used,
+                # or if box_fraction_used will be greater than 1 after this slice.
+                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+                        (box_fraction_used >
+                         self.minimum_coherent_box_fraction) or \
+                        (box_fraction_used +
+                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    # Random start point
+                    self.light_ray_solution[q]['start'] = np.random.random(3)
+                    theta = np.pi * np.random.random()
+                    phi = 2 * np.pi * np.random.random()
+                    box_fraction_used = 0.0
+                else:
+                    # Use end point of previous segment and same theta and phi.
+                    self.light_ray_solution[q]['start'] = \
+                      self.light_ray_solution[q-1]['end'][:]
+
+                self.light_ray_solution[q]['end'] = \
+                  self.light_ray_solution[q]['start'] + \
+                    self.light_ray_solution[q]['traversal_box_fraction'] * \
+                    np.array([np.cos(phi) * np.sin(theta),
+                              np.sin(phi) * np.sin(theta),
+                              np.cos(theta)])
+                box_fraction_used += \
+                  self.light_ray_solution[q]['traversal_box_fraction']
 
         if filename is not None:
             self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None, fields=None,
+    def make_light_ray(self, seed=None,
+                       start_position=None, end_position=None,
+                       trajectory=None,
+                       fields=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=False,
                        get_nearest_halo=False,
@@ -197,6 +232,19 @@
         seed : int
             Seed for the random number generator.
             Default: None.
+        start_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the starting position of the ray.
+            Default: None.
+        end_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the ending position of the ray.
+            Default: None.
+        trajectory : list of floats
+            Used only if creating a light ray from a single dataset.
+            The (r, theta, phi) direction of the light ray.  Use either 
+        end_position or trajectory, not both.
+            Default: None.
         fields : list
             A list of fields for which to get data.
             Default: None.
@@ -313,7 +361,11 @@
             nearest_halo_fields = []
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+        self._calculate_light_ray_solution(seed=seed, 
+                                           start_position=start_position, 
+                                           end_position=end_position,
+                                           trajectory=trajectory,
+                                           filename=solution_filename)
 
         # Initialize data structures.
         self._data = {}
@@ -335,9 +387,18 @@
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
                                                        njobs=njobs, dynamic=dynamic):
-            mylog.info("Creating ray segment at z = %f." %
-                       my_segment['redshift'])
-            if my_segment['next'] is None:
+
+            # Load dataset for segment.
+            pf = load(my_segment['filename'])
+
+            if self.near_redshift == self.far_redshift:
+                h_vel = cm_per_km * pf.units['mpc'] * \
+                  vector_length(my_segment['start'], my_segment['end']) * \
+                  self.cosmology.HubbleConstantNow * \
+                  self.cosmology.ExpansionFactor(my_segment['redshift'])
+                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+            elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
                 next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Load dataset for segment.
-            pf = load(my_segment['filename'])
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
 

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectories object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields=None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None: fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first interpolate the field to the particle positions
+        # and then return the field. 
+
+        pf = self.pfs[0]
+        self.derived_field_list = pf.h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if pf.field_info[field].particle_type]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each dataset, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs:
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+                print "Not all requested particle ids contained in this dataset!"
+                raise IndexError
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = np.array(self.times)
+
+        # Now instantiate the requested fields 
+        for field in fields:
+            self._get_data(field)
+            
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+        if key == "particle_time":
+            return self.times
+        if not self.field_data.has_key(key):
+            self._get_data(key)
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        for field in fields:
+            if not self.field_data.has_key(field):
+                self._get_data(field)
+                
+    def _get_data(self, field):
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        if not self.field_data.has_key(field):
+            particles = np.empty((0))
+            step = int(0)
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+                if field in self.particle_fields:
+                    # This is easy... just get the particle fields
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = np.append(particles, pfield[sort])
+                else:
+                    # This is hard... must loop over grids
+                    pfield = np.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    for grid in particle_grids:
+                        cube = grid.retrieve_ghost_zones(1, [field])
+                        CICSample_3(x,y,z,pfield,
+                                    self.num_indices,
+                                    cube[field],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    np.float64(grid['dx']))
+                    particles = np.append(particles, pfield)
+                step += 1
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in xrange(self.num_indices):
+            outlines = [first_str]
+            for it in xrange(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fields = [field for field in sorted(self.field_data.keys())]
+        fid.create_dataset("particle_indices", dtype=np.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        for field in fields:
+            fid.create_dataset("%s" % field, data=self[field])
+        fid.close()

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('particle_trajectories', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/photon_simulator/api.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.analysis_modules.photon_simulator.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .photon_models import \
+     PhotonModel, \
+     ThermalPhotonModel
+
+from .photon_simulator import \
+     PhotonList, \
+     EventList
+
+from .spectral_models import \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel

diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/photon_simulator/photon_models.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -0,0 +1,205 @@
+"""
+Classes for specific photon models
+
+The algorithms used here are based off of the method used by the
+PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
+developed by Veronica Biffi and Klaus Dolag. References for
+PHOX may be found at:
+
+Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545
+http://adsabs.harvard.edu/abs/2012MNRAS.420.3545B
+
+Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395
+http://adsabs.harvard.edu/abs/2013MNRAS.428.1395B
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import *
+from yt.utilities.physical_constants import \
+     mp, cm_per_km, K_per_keV, cm_per_mpc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system
+
+N_TBIN = 10000
+TMIN = 8.08e-2
+TMAX = 50.
+
+comm = communication_system.communicators[-1]
+
+class PhotonModel(object):
+
+    def __init__(self):
+        pass
+
+    def __call__(self, data_source, parameters):
+        photons = {}
+        return photons
+
+class ThermalPhotonModel(PhotonModel):
+    r"""
+    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    
+    Parameters
+    ----------
+
+    spectral_model : `SpectralModel`
+        A thermal spectral model instance, either of `XSpecThermalModel`
+        or `TableApecModel`. 
+    X_H : float, optional
+        The hydrogen mass fraction.
+    Zmet : float or string, optional
+        The metallicity. If a float, assumes a constant metallicity throughout.
+        If a string, is taken to be the name of the metallicity field.
+    """
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+        self.X_H = X_H
+        self.Zmet = Zmet
+        self.spectral_model = spectral_model
+
+    def __call__(self, data_source, parameters):
+        
+        pf = data_source.pf
+
+        exp_time = parameters["FiducialExposureTime"]
+        area = parameters["FiducialArea"]
+        redshift = parameters["FiducialRedshift"]
+        D_A = parameters["FiducialAngularDiameterDistance"]*cm_per_mpc
+        dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
+                
+        vol_scale = pf.units["cm"]**(-3)/np.prod(pf.domain_width)
+        
+        num_cells = data_source["Temperature"].shape[0]
+        start_c = comm.rank*num_cells/comm.size
+        end_c = (comm.rank+1)*num_cells/comm.size
+        
+        kT = data_source["Temperature"][start_c:end_c].copy()/K_per_keV
+        vol = data_source["CellVolume"][start_c:end_c].copy()
+        dx = data_source["dx"][start_c:end_c].copy()
+        EM = (data_source["Density"][start_c:end_c].copy()/mp)**2
+        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+    
+        data_source.clear_data()
+    
+        x = data_source["x"][start_c:end_c].copy()
+        y = data_source["y"][start_c:end_c].copy()
+        z = data_source["z"][start_c:end_c].copy()
+    
+        data_source.clear_data()
+        
+        vx = data_source["x-velocity"][start_c:end_c].copy()
+        vy = data_source["y-velocity"][start_c:end_c].copy()
+        vz = data_source["z-velocity"][start_c:end_c].copy()
+    
+        if isinstance(self.Zmet, basestring):
+            metalZ = data_source[self.Zmet][start_c:end_c].copy()
+        else:
+            metalZ = self.Zmet*np.ones(EM.shape)
+        
+        data_source.clear_data()
+
+        idxs = np.argsort(kT)
+        dshape = idxs.shape
+
+        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
+        dkT = kT_bins[1]-kT_bins[0]
+        kT_idxs = np.digitize(kT[idxs], kT_bins)
+        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
+        bcounts = np.bincount(kT_idxs).astype("int")
+        bcounts = bcounts[bcounts > 0]
+        n = int(0)
+        bcell = []
+        ecell = []
+        for bcount in bcounts:
+            bcell.append(n)
+            ecell.append(n+bcount)
+            n += bcount
+        kT_idxs = np.unique(kT_idxs)
+        
+        self.spectral_model.prepare()
+        energy = self.spectral_model.ebins
+    
+        cell_em = EM[idxs]*vol_scale
+        cell_vol = vol[idxs]*vol_scale
+    
+        number_of_photons = np.zeros(dshape, dtype='uint64')
+        energies = []
+    
+        u = np.random.random(cell_em.shape)
+        
+        pbar = get_pbar("Generating Photons", dshape[0])
+
+        for i, ikT in enumerate(kT_idxs):
+
+            ncells = int(bcounts[i])
+            ibegin = bcell[i]
+            iend = ecell[i]
+            kT = kT_bins[ikT] + 0.5*dkT
+        
+            em_sum_c = cell_em[ibegin:iend].sum()
+            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+            
+            cspec, mspec = self.spectral_model.get_spectrum(kT)
+            cspec *= dist_fac*em_sum_c/vol_scale
+            mspec *= dist_fac*em_sum_m/vol_scale
+        
+            cumspec_c = np.cumsum(cspec)
+            counts_c = cumspec_c[:]/cumspec_c[-1]
+            counts_c = np.insert(counts_c, 0, 0.0)
+            tot_ph_c = cumspec_c[-1]*area*exp_time
+
+            cumspec_m = np.cumsum(mspec)
+            counts_m = cumspec_m[:]/cumspec_m[-1]
+            counts_m = np.insert(counts_m, 0, 0.0)
+            tot_ph_m = cumspec_m[-1]*area*exp_time
+        
+            for icell in xrange(ibegin, iend):
+            
+                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+            
+                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+            
+                cell_n = cell_n_c + cell_n_m
+
+                if cell_n > 0:
+                    number_of_photons[icell] = cell_n
+                    randvec_c = np.random.uniform(size=cell_n_c)
+                    randvec_c.sort()
+                    randvec_m = np.random.uniform(size=cell_n_m)
+                    randvec_m.sort()
+                    cell_e_c = np.interp(randvec_c, counts_c, energy)
+                    cell_e_m = np.interp(randvec_m, counts_m, energy)
+                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
+                
+                pbar.update(icell)
+
+        pbar.finish()
+            
+        active_cells = number_of_photons > 0
+        idxs = idxs[active_cells]
+        
+        photons = {}
+
+        src_ctr = parameters["center"]
+        
+        photons["x"] = (x[idxs]-src_ctr[0])*pf.units["kpc"]
+        photons["y"] = (y[idxs]-src_ctr[1])*pf.units["kpc"]
+        photons["z"] = (z[idxs]-src_ctr[2])*pf.units["kpc"]
+        photons["vx"] = vx[idxs]/cm_per_km
+        photons["vy"] = vy[idxs]/cm_per_km
+        photons["vz"] = vz[idxs]/cm_per_km
+        photons["dx"] = dx[idxs]*pf.units["kpc"]
+        photons["NumberOfPhotons"] = number_of_photons[active_cells]
+        photons["Energy"] = np.concatenate(energies)
+    
+        return photons

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/6e029bcb0dbf/
Changeset:   6e029bcb0dbf
Branch:      yt-3.0
User:        xarthisius
Date:        2013-11-25 11:41:08
Summary:     Add setup for yt/extern/progressbar. Fixes issue #732
Affected #:  1 file

diff -r a788d5a847d925ef1d57f639db889ca273b651e0 -r 6e029bcb0dbf8680278737524cce19e3365cbea1 yt/extern/setup.py
--- /dev/null
+++ b/yt/extern/setup.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('extern', parent_package, top_path)
+    config.add_subpackage("progressbar")
+    config.make_config_py()
+    return config


https://bitbucket.org/yt_analysis/yt/commits/3ecddcf90d39/
Changeset:   3ecddcf90d39
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-25 22:03:33
Summary:     Removing duplicated Hexahedral Mesh code.  Speed up hexahedral_connectivity.
Affected #:  2 files

diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r 3ecddcf90d39dcb55ed7bb16cbc28076b326f536 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -861,145 +861,6 @@
 
     return spf
 
-def hexahedral_connectivity(xgrid, ygrid, zgrid):
-    nx = len(xgrid)
-    ny = len(ygrid)
-    nz = len(zgrid)
-    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
-                    dtype=np.float64, count = nx*ny*nz*3)
-    coords.shape = (nx*ny*nz, 3)
-    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
-                    dtype=np.int64, count = 8*3)
-    cis.shape = (8, 3)
-    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
-                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
-    cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
-    off = cis + cycle[:, np.newaxis]
-    connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
-    return coords, connectivity
-
-class StreamHexahedralMesh(SemiStructuredMesh):
-    _connectivity_length = 8
-    _index_offset = 0
-
-class StreamHexahedralHierarchy(UnstructuredGeometryHandler):
-
-    def __init__(self, pf, data_style = None):
-        self.stream_handler = pf.stream_handler
-        super(StreamHexahedralHierarchy, self).__init__(pf, data_style)
-
-    def _initialize_mesh(self):
-        coords = self.stream_handler.fields.pop('coordinates')
-        connec = self.stream_handler.fields.pop('connectivity')
-        self.meshes = [StreamHexahedralMesh(0,
-          self.hierarchy_filename, connec, coords, self)]
-
-    def _setup_data_io(self):
-        if self.stream_handler.io is not None:
-            self.io = self.stream_handler.io
-        else:
-            self.io = io_registry[self.data_style](self.pf)
-
-    def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
-
-class StreamHexahedralStaticOutput(StreamStaticOutput):
-    _hierarchy_class = StreamHexahedralHierarchy
-    _fieldinfo_fallback = StreamFieldInfo
-    _fieldinfo_known = KnownStreamFields
-    _data_style = "stream_hexahedral"
-
-def load_hexahedral_mesh(data, connectivity, coordinates,
-                         sim_unit_to_cm, bbox=None,
-                         sim_time=0.0, periodicity=(True, True, True)):
-    r"""Load a hexahedral mesh of data into yt as a
-    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
-
-    This should allow a semistructured grid of data to be loaded directly into
-    yt and analyzed as would any others.  This comes with several caveats:
-        * Units will be incorrect unless the data has already been converted to
-          cgs.
-        * Some functions may behave oddly, and parallelism will be
-          disappointing or non-existent in most cases.
-        * Particles may be difficult to integrate.
-
-    Particle fields are detected as one-dimensional fields. The number of particles
-    is set by the "number_of_particles" key in data.
-    
-    Parameters
-    ----------
-    data : dict
-        This is a dict of numpy arrays, where the keys are the field names.
-        There must only be one.
-    connectivity : array_like
-        This should be of size (N,8) where N is the number of zones.
-    coordinates : array_like
-        This should be of size (M,3) where M is the number of vertices
-        indicated in the connectivity matrix.
-    sim_unit_to_cm : float
-        Conversion factor from simulation units to centimeters
-    bbox : array_like (xdim:zdim, LE:RE), optional
-        Size of computational domain in units sim_unit_to_cm
-    sim_time : float, optional
-        The simulation time in seconds
-    periodicity : tuple of booleans
-        Determines whether the data will be treated as periodic along
-        each axis
-
-    """
-
-    domain_dimensions = np.ones(3, "int32") * 2
-    nprocs = 1
-    if bbox is None:
-        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
-    domain_left_edge = np.array(bbox[:, 0], 'float64')
-    domain_right_edge = np.array(bbox[:, 1], 'float64')
-    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
-
-    sfh = StreamDictFieldHandler()
-    
-    particle_types = set_particle_types(data)
-    
-    sfh.update({'connectivity': connectivity,
-                'coordinates': coordinates,
-                0: data})
-    grid_left_edges = domain_left_edge
-    grid_right_edges = domain_right_edge
-    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
-
-    # I'm not sure we need any of this.
-    handler = StreamHandler(
-        grid_left_edges,
-        grid_right_edges,
-        grid_dimensions,
-        grid_levels,
-        -np.ones(nprocs, dtype='int64'),
-        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
-        np.zeros(nprocs).reshape((nprocs,1)),
-        sfh,
-        particle_types=particle_types,
-        periodicity=periodicity
-    )
-
-    handler.name = "HexahedralMeshData"
-    handler.domain_left_edge = domain_left_edge
-    handler.domain_right_edge = domain_right_edge
-    handler.refine_by = 2
-    handler.dimensionality = 3
-    handler.domain_dimensions = domain_dimensions
-    handler.simulation_time = sim_time
-    handler.cosmology_simulation = 0
-
-    spf = StreamHexahedralStaticOutput(handler)
-    spf.units["cm"] = sim_unit_to_cm
-    spf.units['1'] = 1.0
-    spf.units["unitary"] = 1.0
-    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
-    for unit in mpc_conversion.keys():
-        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
-
-    return spf
-
 class StreamOctreeSubset(OctreeSubset):
     domain_id = 1
     _domain_offset = 1
@@ -1190,20 +1051,22 @@
 
     return spf
 
+_cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                dtype=np.int64, count = 8*3)
+_cis.shape = (8, 3)
+
 def hexahedral_connectivity(xgrid, ygrid, zgrid):
     nx = len(xgrid)
     ny = len(ygrid)
     nz = len(zgrid)
-    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
-                    dtype=np.float64, count = nx*ny*nz*3)
-    coords.shape = (nx*ny*nz, 3)
-    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
-                    dtype=np.int64, count = 8*3)
-    cis.shape = (8, 3)
-    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
-                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
+    coords = np.zeros((nx, ny, nz, 3), dtype="float64", order="C")
+    coords[:,:,:,0] = xgrid[:,None,None]
+    coords[:,:,:,1] = ygrid[None,:,None]
+    coords[:,:,:,2] = zgrid[None,None,:]
+    coords.shape = (nx * ny * nz, 3)
+    cycle = np.rollaxis(np.indices((nx-1,ny-1,nz-1)), 0, 4)
     cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
-    off = cis + cycle[:, np.newaxis]
+    off = _cis + cycle[:, np.newaxis]
     connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
     return coords, connectivity
 

diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r 3ecddcf90d39dcb55ed7bb16cbc28076b326f536 yt/utilities/lib/mesh_utilities.pyx
--- a/yt/utilities/lib/mesh_utilities.pyx
+++ b/yt/utilities/lib/mesh_utilities.pyx
@@ -76,7 +76,7 @@
 def smallest_fwidth(np.ndarray[np.float64_t, ndim=2] coords,
                     np.ndarray[np.int64_t, ndim=2] indices,
                     int offset = 0):
-    cdef np.float64_t fwidth = 1e60
+    cdef np.float64_t fwidth = 1e60, pos
     cdef int nc = indices.shape[0]
     cdef int nv = indices.shape[1]
     if nv != 8:


https://bitbucket.org/yt_analysis/yt/commits/d0546b049240/
Changeset:   d0546b049240
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-27 20:06:12
Summary:     Add a double-check for Tipsy datasets.
Affected #:  1 file

diff -r 3ecddcf90d39dcb55ed7bb16cbc28076b326f536 -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -503,7 +503,19 @@
             self.periodicity = (True, True, True)
         else:
             self.periodicity = (False, False, False)
-
+        tot = sum(self.parameters[ptype] for ptype
+                  in ('nsph', 'ndark', 'nstar'))
+        if tot != self.parameters['nbodies']:
+            print "SOMETHING HAS GONE WRONG.  NBODIES != SUM PARTICLES."
+            print "%s != (%s == %s + %s + %s)" % (
+                self.parameters['nbodies'],
+                tot,
+                self.parameters['nsph'],
+                self.parameters['ndark'],
+                self.parameters['nstar'])
+            print "Often this can be fixed by changing the 'endian' parameter."
+            print "This defaults to '>' but may in fact be '<'."
+            raise RuntimeError
         if self.parameters.get('bComove', True):
             self.cosmological_simulation = 1
             cosm = self._cosmology_parameters or {}


https://bitbucket.org/yt_analysis/yt/commits/ce44c24bd28d/
Changeset:   ce44c24bd28d
Branch:      yt-3.0
User:        brittonsmith
Date:        2013-11-28 15:47:31
Summary:     Merged in yt_analysis/yt-3.0/yt-3.0 (pull request #654)

This just brings the latest set of changes into the main yt repo.
Affected #:  72 files

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
 include distribute_setup.py README* CREDITS COPYING.txt CITATION
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE kdtree2-README
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there!  You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses.  It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there!  You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms.  It's written in 
+python and heavily leverages both NumPy and Matplotlib for fast arrays and 
+visualization, respectively.
 
 Full documentation and a user community can be found at:
 
 http://yt-project.org/
+
 http://yt-project.org/doc/
 
 If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
 doc/install_script.sh .  You will have to set the destination directory, and
 there are options available, but it should be straightforward.
 
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or 
+ways to help development, please visit our website.
 
 Enjoy!

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
 get_ytdata xray_emissivity.h5
 
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
@@ -918,6 +923,8 @@
 do_setup_py $SYMPY
 [ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
+( ${DEST_DIR}/bin/pip install jinja2 2>&1 ) 1>> ${LOG_FILE}
+
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -86,6 +86,10 @@
     #Empty fit without any lines
     yFit = na.ones(len(fluxData))
 
+    #Force the first and last flux pixel to be 1 to prevent OOB
+    fluxData[0]=1
+    fluxData[-1]=1
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -120,9 +124,10 @@
                     z,fitLim,minError*(b[2]-b[1]),speciesDict)
 
             #Check existence of partner lines if applicable
-            newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                    b, minError*(b[2]-b[1]),
-                    x0, xRes, speciesDict)
+            if len(speciesDict['wavelength']) != 1:
+                newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
+                        b, minError*(b[2]-b[1]),
+                        x0, xRes, speciesDict)
 
             #If flagged as a bad fit, species is lyman alpha,
             #   and it may be a saturated line, use special tools
@@ -548,6 +553,10 @@
         #Index of the redshifted wavelength
         indexRedWl = (redWl-x0)/xRes
 
+        #Check to see if even in flux range
+        if indexRedWl > len(y):
+            return False
+
         #Check if surpasses minimum absorption bound
         if y[int(indexRedWl)]>fluxMin:
             return False

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,16 @@
 from .radmc3d_export.api import \
     RadMC3DWriter
 
+from .particle_trajectories.api import \
+    ParticleTrajectories
+
+from .photon_simulator.api import \
+     PhotonList, \
+     EventList, \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel, \
+     PhotonModel, \
+     ThermalPhotonModel

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
         cosmology_splice = []
-
+ 
+        if near_redshift == far_redshift:
+            self.simulation.get_time_series(redshifts=[near_redshift])
+            cosmology_splice.append({'time': self.simulation[0].current_time,
+                                     'redshift': self.simulation[0].current_redshift,
+                                     'filename': os.path.join(self.simulation[0].fullpath,
+                                                              self.simulation[0].basename),
+                                     'next': None})
+            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+                       (cosmology_splice[0]['filename'], near_redshift))
+            return cosmology_splice
+        
         # Use minimum number of datasets to go from z_i to z_f.
         if minimal:
 

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
     only_on_root, \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import \
+     speed_of_light_cgs, \
+     cm_per_km
 
 class LightRay(CosmologySplice):
     """
@@ -51,7 +54,9 @@
     near_redshift : float
         The near (lowest) redshift for the light ray.
     far_redshift : float
-        The far (highest) redshift for the light ray.
+        The far (highest) redshift for the light ray.  NOTE: in order 
+        to use only a single dataset in a light ray, set the 
+        near_redshift and far_redshift to be the same.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
@@ -111,65 +116,92 @@
                                        time_data=time_data,
                                        redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, filename=None):
+    def _calculate_light_ray_solution(self, seed=None, 
+                                      start_position=None, end_position=None,
+                                      trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
         np.random.seed(seed)
 
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
+        # If using only one dataset, set start and stop manually.
+        if start_position is not None:
+            if len(self.light_ray_solution) > 1:
+                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+            if not ((end_position is None) ^ (trajectory is None)):
+                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if end_position is not None:
+                self.light_ray_solution[0]['end'] = np.array(end_position)
+            else:
+                # assume trajectory given as r, theta, phi
+                if len(trajectory) != 3:
+                    raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+                r, theta, phi = trajectory
+                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+                  r * np.array([np.cos(phi) * np.sin(theta),
+                                np.sin(phi) * np.sin(theta),
+                                np.cos(theta)])
+            self.light_ray_solution[0]['traversal_box_fraction'] = \
+              vector_length(self.light_ray_solution[0]['start'], 
+                            self.light_ray_solution[0]['end'])
 
-        for q in range(len(self.light_ray_solution)):
-            if (q == len(self.light_ray_solution) - 1):
-                z_next = self.near_redshift
-            else:
-                z_next = self.light_ray_solution[q+1]['redshift']
+        # the normal way (random start positions and trajectories for each dataset)
+        else:
+            
+            # For box coherence, keep track of effective depth travelled.
+            box_fraction_used = 0.0
 
-            # Calculate fraction of box required for a depth of delta z
-            self.light_ray_solution[q]['traversal_box_fraction'] = \
-                self.cosmology.ComovingRadialDistance(\
-                z_next, self.light_ray_solution[q]['redshift']) * \
-                self.simulation.hubble_constant / \
-                self.simulation.box_size
+            for q in range(len(self.light_ray_solution)):
+                if (q == len(self.light_ray_solution) - 1):
+                    z_next = self.near_redshift
+                else:
+                    z_next = self.light_ray_solution[q+1]['redshift']
 
-            # Simple error check to make sure more than 100% of box depth
-            # is never required.
-            if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_ray_solution[q]['redshift'], z_next,
-                             self.light_ray_solution[q]['traversal_box_fraction']))
-                mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_ray_solution[q]['deltazMax'],
-                             self.light_ray_solution[q]['redshift']-z_next))
+                # Calculate fraction of box required for a depth of delta z
+                self.light_ray_solution[q]['traversal_box_fraction'] = \
+                    self.cosmology.ComovingRadialDistance(\
+                    z_next, self.light_ray_solution[q]['redshift']) * \
+                    self.simulation.hubble_constant / \
+                    self.simulation.box_size
 
-            # Get dataset axis and center.
-            # If using box coherence, only get start point and vector if
-            # enough of the box has been used,
-            # or if box_fraction_used will be greater than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used >
-                     self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                # Random start point
-                self.light_ray_solution[q]['start'] = np.random.random(3)
-                theta = np.pi * np.random.random()
-                phi = 2 * np.pi * np.random.random()
-                box_fraction_used = 0.0
-            else:
-                # Use end point of previous segment and same theta and phi.
-                self.light_ray_solution[q]['start'] = \
-                  self.light_ray_solution[q-1]['end'][:]
+                # Simple error check to make sure more than 100% of box depth
+                # is never required.
+                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+                                (self.light_ray_solution[q]['redshift'], z_next,
+                                 self.light_ray_solution[q]['traversal_box_fraction']))
+                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+                                (self.light_ray_solution[q]['deltazMax'],
+                                 self.light_ray_solution[q]['redshift']-z_next))
 
-            self.light_ray_solution[q]['end'] = \
-              self.light_ray_solution[q]['start'] + \
-                self.light_ray_solution[q]['traversal_box_fraction'] * \
-                np.array([np.cos(phi) * np.sin(theta),
-                          np.sin(phi) * np.sin(theta),
-                          np.cos(theta)])
-            box_fraction_used += \
-              self.light_ray_solution[q]['traversal_box_fraction']
+                # Get dataset axis and center.
+                # If using box coherence, only get start point and vector if
+                # enough of the box has been used,
+                # or if box_fraction_used will be greater than 1 after this slice.
+                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+                        (box_fraction_used >
+                         self.minimum_coherent_box_fraction) or \
+                        (box_fraction_used +
+                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    # Random start point
+                    self.light_ray_solution[q]['start'] = np.random.random(3)
+                    theta = np.pi * np.random.random()
+                    phi = 2 * np.pi * np.random.random()
+                    box_fraction_used = 0.0
+                else:
+                    # Use end point of previous segment and same theta and phi.
+                    self.light_ray_solution[q]['start'] = \
+                      self.light_ray_solution[q-1]['end'][:]
+
+                self.light_ray_solution[q]['end'] = \
+                  self.light_ray_solution[q]['start'] + \
+                    self.light_ray_solution[q]['traversal_box_fraction'] * \
+                    np.array([np.cos(phi) * np.sin(theta),
+                              np.sin(phi) * np.sin(theta),
+                              np.cos(theta)])
+                box_fraction_used += \
+                  self.light_ray_solution[q]['traversal_box_fraction']
 
         if filename is not None:
             self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None, fields=None,
+    def make_light_ray(self, seed=None,
+                       start_position=None, end_position=None,
+                       trajectory=None,
+                       fields=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=False,
                        get_nearest_halo=False,
@@ -197,6 +232,19 @@
         seed : int
             Seed for the random number generator.
             Default: None.
+        start_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the starting position of the ray.
+            Default: None.
+        end_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the ending position of the ray.
+            Default: None.
+        trajectory : list of floats
+            Used only if creating a light ray from a single dataset.
+            The (r, theta, phi) direction of the light ray.  Use either 
+        end_position or trajectory, not both.
+            Default: None.
         fields : list
             A list of fields for which to get data.
             Default: None.
@@ -313,7 +361,11 @@
             nearest_halo_fields = []
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+        self._calculate_light_ray_solution(seed=seed, 
+                                           start_position=start_position, 
+                                           end_position=end_position,
+                                           trajectory=trajectory,
+                                           filename=solution_filename)
 
         # Initialize data structures.
         self._data = {}
@@ -335,9 +387,18 @@
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
                                                        njobs=njobs, dynamic=dynamic):
-            mylog.info("Creating ray segment at z = %f." %
-                       my_segment['redshift'])
-            if my_segment['next'] is None:
+
+            # Load dataset for segment.
+            pf = load(my_segment['filename'])
+
+            if self.near_redshift == self.far_redshift:
+                h_vel = cm_per_km * pf.units['mpc'] * \
+                  vector_length(my_segment['start'], my_segment['end']) * \
+                  self.cosmology.HubbleConstantNow * \
+                  self.cosmology.ExpansionFactor(my_segment['redshift'])
+                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+            elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
                 next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Load dataset for segment.
-            pf = load(my_segment['filename'])
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
 

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectories object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields=None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None: fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first interpolate the field to the particle positions
+        # and then return the field. 
+
+        pf = self.pfs[0]
+        self.derived_field_list = pf.h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if pf.field_info[field].particle_type]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each dataset, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs:
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+                print "Not all requested particle ids contained in this dataset!"
+                raise IndexError
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = np.array(self.times)
+
+        # Now instantiate the requested fields 
+        for field in fields:
+            self._get_data(field)
+            
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+        if key == "particle_time":
+            return self.times
+        if not self.field_data.has_key(key):
+            self._get_data(key)
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        for field in fields:
+            if not self.field_data.has_key(field):
+                self._get_data(field)
+                
+    def _get_data(self, field):
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        if not self.field_data.has_key(field):
+            particles = np.empty((0))
+            step = int(0)
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+                if field in self.particle_fields:
+                    # This is easy... just get the particle fields
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = np.append(particles, pfield[sort])
+                else:
+                    # This is hard... must loop over grids
+                    pfield = np.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    for grid in particle_grids:
+                        cube = grid.retrieve_ghost_zones(1, [field])
+                        CICSample_3(x,y,z,pfield,
+                                    self.num_indices,
+                                    cube[field],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    np.float64(grid['dx']))
+                    particles = np.append(particles, pfield)
+                step += 1
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in xrange(self.num_indices):
+            outlines = [first_str]
+            for it in xrange(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fields = [field for field in sorted(self.field_data.keys())]
+        fid.create_dataset("particle_indices", dtype=np.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        for field in fields:
+            fid.create_dataset("%s" % field, data=self[field])
+        fid.close()

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('particle_trajectories', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/photon_simulator/api.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.analysis_modules.photon_simulator.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .photon_models import \
+     PhotonModel, \
+     ThermalPhotonModel
+
+from .photon_simulator import \
+     PhotonList, \
+     EventList
+
+from .spectral_models import \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel

diff -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 -r ce44c24bd28d3d79f7c2d87091902edabbf00bac yt/analysis_modules/photon_simulator/photon_models.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -0,0 +1,205 @@
+"""
+Classes for specific photon models
+
+The algorithms used here are based off of the method used by the
+PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
+developed by Veronica Biffi and Klaus Dolag. References for
+PHOX may be found at:
+
+Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545
+http://adsabs.harvard.edu/abs/2012MNRAS.420.3545B
+
+Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395
+http://adsabs.harvard.edu/abs/2013MNRAS.428.1395B
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import *
+from yt.utilities.physical_constants import \
+     mp, cm_per_km, K_per_keV, cm_per_mpc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system
+
+N_TBIN = 10000
+TMIN = 8.08e-2
+TMAX = 50.
+
+comm = communication_system.communicators[-1]
+
+class PhotonModel(object):
+
+    def __init__(self):
+        pass
+
+    def __call__(self, data_source, parameters):
+        photons = {}
+        return photons
+
+class ThermalPhotonModel(PhotonModel):
+    r"""
+    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    
+    Parameters
+    ----------
+
+    spectral_model : `SpectralModel`
+        A thermal spectral model instance, either of `XSpecThermalModel`
+        or `TableApecModel`. 
+    X_H : float, optional
+        The hydrogen mass fraction.
+    Zmet : float or string, optional
+        The metallicity. If a float, assumes a constant metallicity throughout.
+        If a string, is taken to be the name of the metallicity field.
+    """
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+        self.X_H = X_H
+        self.Zmet = Zmet
+        self.spectral_model = spectral_model
+
+    def __call__(self, data_source, parameters):
+        
+        pf = data_source.pf
+
+        exp_time = parameters["FiducialExposureTime"]
+        area = parameters["FiducialArea"]
+        redshift = parameters["FiducialRedshift"]
+        D_A = parameters["FiducialAngularDiameterDistance"]*cm_per_mpc
+        dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
+                
+        vol_scale = pf.units["cm"]**(-3)/np.prod(pf.domain_width)
+        
+        num_cells = data_source["Temperature"].shape[0]
+        start_c = comm.rank*num_cells/comm.size
+        end_c = (comm.rank+1)*num_cells/comm.size
+        
+        kT = data_source["Temperature"][start_c:end_c].copy()/K_per_keV
+        vol = data_source["CellVolume"][start_c:end_c].copy()
+        dx = data_source["dx"][start_c:end_c].copy()
+        EM = (data_source["Density"][start_c:end_c].copy()/mp)**2
+        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+    
+        data_source.clear_data()
+    
+        x = data_source["x"][start_c:end_c].copy()
+        y = data_source["y"][start_c:end_c].copy()
+        z = data_source["z"][start_c:end_c].copy()
+    
+        data_source.clear_data()
+        
+        vx = data_source["x-velocity"][start_c:end_c].copy()
+        vy = data_source["y-velocity"][start_c:end_c].copy()
+        vz = data_source["z-velocity"][start_c:end_c].copy()
+    
+        if isinstance(self.Zmet, basestring):
+            metalZ = data_source[self.Zmet][start_c:end_c].copy()
+        else:
+            metalZ = self.Zmet*np.ones(EM.shape)
+        
+        data_source.clear_data()
+
+        idxs = np.argsort(kT)
+        dshape = idxs.shape
+
+        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
+        dkT = kT_bins[1]-kT_bins[0]
+        kT_idxs = np.digitize(kT[idxs], kT_bins)
+        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
+        bcounts = np.bincount(kT_idxs).astype("int")
+        bcounts = bcounts[bcounts > 0]
+        n = int(0)
+        bcell = []
+        ecell = []
+        for bcount in bcounts:
+            bcell.append(n)
+            ecell.append(n+bcount)
+            n += bcount
+        kT_idxs = np.unique(kT_idxs)
+        
+        self.spectral_model.prepare()
+        energy = self.spectral_model.ebins
+    
+        cell_em = EM[idxs]*vol_scale
+        cell_vol = vol[idxs]*vol_scale
+    
+        number_of_photons = np.zeros(dshape, dtype='uint64')
+        energies = []
+    
+        u = np.random.random(cell_em.shape)
+        
+        pbar = get_pbar("Generating Photons", dshape[0])
+
+        for i, ikT in enumerate(kT_idxs):
+
+            ncells = int(bcounts[i])
+            ibegin = bcell[i]
+            iend = ecell[i]
+            kT = kT_bins[ikT] + 0.5*dkT
+        
+            em_sum_c = cell_em[ibegin:iend].sum()
+            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+            
+            cspec, mspec = self.spectral_model.get_spectrum(kT)
+            cspec *= dist_fac*em_sum_c/vol_scale
+            mspec *= dist_fac*em_sum_m/vol_scale
+        
+            cumspec_c = np.cumsum(cspec)
+            counts_c = cumspec_c[:]/cumspec_c[-1]
+            counts_c = np.insert(counts_c, 0, 0.0)
+            tot_ph_c = cumspec_c[-1]*area*exp_time
+
+            cumspec_m = np.cumsum(mspec)
+            counts_m = cumspec_m[:]/cumspec_m[-1]
+            counts_m = np.insert(counts_m, 0, 0.0)
+            tot_ph_m = cumspec_m[-1]*area*exp_time
+        
+            for icell in xrange(ibegin, iend):
+            
+                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+            
+                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+            
+                cell_n = cell_n_c + cell_n_m
+
+                if cell_n > 0:
+                    number_of_photons[icell] = cell_n
+                    randvec_c = np.random.uniform(size=cell_n_c)
+                    randvec_c.sort()
+                    randvec_m = np.random.uniform(size=cell_n_m)
+                    randvec_m.sort()
+                    cell_e_c = np.interp(randvec_c, counts_c, energy)
+                    cell_e_m = np.interp(randvec_m, counts_m, energy)
+                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
+                
+                pbar.update(icell)
+
+        pbar.finish()
+            
+        active_cells = number_of_photons > 0
+        idxs = idxs[active_cells]
+        
+        photons = {}
+
+        src_ctr = parameters["center"]
+        
+        photons["x"] = (x[idxs]-src_ctr[0])*pf.units["kpc"]
+        photons["y"] = (y[idxs]-src_ctr[1])*pf.units["kpc"]
+        photons["z"] = (z[idxs]-src_ctr[2])*pf.units["kpc"]
+        photons["vx"] = vx[idxs]/cm_per_km
+        photons["vy"] = vy[idxs]/cm_per_km
+        photons["vz"] = vz[idxs]/cm_per_km
+        photons["dx"] = dx[idxs]*pf.units["kpc"]
+        photons["NumberOfPhotons"] = number_of_photons[active_cells]
+        photons["Energy"] = np.concatenate(energies)
+    
+        return photons

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list