[yt-svn] commit/yt: 9 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Jul 6 16:08:07 PDT 2012


9 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/cf5da37386f7/
changeset:   cf5da37386f7
branch:      yt
user:        Andrew Myers
date:        2012-06-21 00:02:13
summary:     allowing refine_by to vary with level
affected #:  3 files

diff -r 61acaf1be42b7e1923dea3f1441bbe0c21c58443 -r cf5da37386f733e9ce11e6faa1643e30ecc775f3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2381,7 +2381,7 @@
     def _get_data_from_grid(self, grid, fields, dls):
         g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
-        ref_ratio = self.pf.refine_by**(self.level - grid.Level)
+        ref_ratio = self.pf.refine_by[level]**(self.level - grid.Level)
         FillBuffer(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
             c_fields, g_fields, 
@@ -3463,7 +3463,7 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
-        ref_ratio = self.pf.refine_by**(self.level - grid.Level)
+        ref_ratio = self.pf.refine_by[self.level]**(self.level - grid.Level)
         g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
@@ -3475,7 +3475,7 @@
 
     def _flush_data_to_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
-        ref_ratio = self.pf.refine_by**(self.level - grid.Level)
+        ref_ratio = self.pf.refine_by[level]**(self.level - grid.Level)
         g_fields = []
         for field in fields:
             if not grid.has_key(field): grid[field] = \
@@ -3569,7 +3569,7 @@
         if self._use_pbar: pbar.finish()
 
     def _update_level_state(self, level, fields = None):
-        dx = self._base_dx / self.pf.refine_by**level
+        dx = self._base_dx / self.pf.refine_by[level]**level
         self.field_data['cdx'] = dx[0]
         self.field_data['cdy'] = dx[1]
         self.field_data['cdz'] = dx[2]
@@ -3595,7 +3595,7 @@
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, fields):
-        rf = float(self.pf.refine_by**dlevel)
+        rf = float(self.pf.refine_by[dlevel]**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
         dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')


diff -r 61acaf1be42b7e1923dea3f1441bbe0c21c58443 -r cf5da37386f733e9ce11e6faa1643e30ecc775f3 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -227,6 +227,7 @@
         fileh = h5py.File(filename,'r')
         self.current_time = fileh.attrs['time']
         self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
         


diff -r 61acaf1be42b7e1923dea3f1441bbe0c21c58443 -r cf5da37386f733e9ce11e6faa1643e30ecc775f3 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -171,6 +171,6 @@
 
 for pf in _particle_field_list:
     pfunc = particle_func("particle_%s" % (pf))
-    add_orion_field("particle_%s" % pf, function=pfunc,
-                    validators = [ValidateSpatial(0)],
-                    particle_type=True)
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)



https://bitbucket.org/yt_analysis/yt/changeset/03abcaadc4ae/
changeset:   03abcaadc4ae
branch:      yt
user:        Andrew Myers
date:        2012-06-21 00:15:04
summary:     merging in changes from tip
affected #:  169 files
Diff too large to display.

https://bitbucket.org/yt_analysis/yt/changeset/29def121a9af/
changeset:   29def121a9af
branch:      yt
user:        Andrew Myers
date:        2012-06-21 05:35:12
summary:     fixing a floating point precison bug in AMRSmoothedCoveringGrid, and removing all traces of attempt to make refine_by vary with level
affected #:  3 files

diff -r 03abcaadc4ae132989f0f84413a0850f1b837739 -r 29def121a9af35b77ba1680930dc30b61793c7c9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2385,7 +2385,7 @@
     def _get_data_from_grid(self, grid, fields, dls):
         g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
-        ref_ratio = self.pf.refine_by[level]**(self.level - grid.Level)
+        ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         FillBuffer(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
             c_fields, g_fields, 
@@ -3519,7 +3519,7 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
-        ref_ratio = self.pf.refine_by[self.level]**(self.level - grid.Level)
+        ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
@@ -3531,7 +3531,7 @@
 
     def _flush_data_to_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
-        ref_ratio = self.pf.refine_by[level]**(self.level - grid.Level)
+        ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         g_fields = []
         for field in fields:
             if not grid.has_key(field): grid[field] = \
@@ -3638,7 +3638,7 @@
         if self._use_pbar: pbar.finish()
 
     def _update_level_state(self, level, fields = None):
-        dx = self._base_dx / self.pf.refine_by[level]**level
+        dx = self._base_dx / self.pf.refine_by**level
         self.field_data['cdx'] = dx[0]
         self.field_data['cdy'] = dx[1]
         self.field_data['cdz'] = dx[2]
@@ -3649,7 +3649,7 @@
                     self.pf.domain_left_edge)/dx).astype('int64')
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
+            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
@@ -3657,19 +3657,18 @@
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
-            idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
+            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, fields):
-        rf = float(self.pf.refine_by[dlevel]**dlevel)
+        rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
         dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.right_edge-self.left_edge)/dx+0.5).astype('int32') + 2
-
+        output_dims = na.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
 
         for field in fields:


diff -r 03abcaadc4ae132989f0f84413a0850f1b837739 -r 29def121a9af35b77ba1680930dc30b61793c7c9 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -367,10 +367,7 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        try:
-            rf = self.pf.refine_by[child.Level-1]
-        except TypeError:
-            rf = self.pf.refine_by
+        rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
         startIndex = na.maximum(0, cgi / rf - gi)
         endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,


diff -r 03abcaadc4ae132989f0f84413a0850f1b837739 -r 29def121a9af35b77ba1680930dc30b61793c7c9 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -85,7 +85,7 @@
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
             na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by[self.Level-1]).astype('int64').ravel()
+        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
     def _setup_dx(self):
@@ -93,7 +93,7 @@
         # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by[self.Level-1]
+            self.dds = self.Parent[0].dds / self.pf.refine_by
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
@@ -284,10 +284,8 @@
             self.domain_right_edge = self.__calc_right_edge()
             self.domain_dimensions = self.__calc_domain_dimensions()
             self.dimensionality = 3
-            self.refine_by = []
             fileh = h5py.File(self.parameter_filename,'r')
-            for level in range(0,fileh.attrs['num_levels']):
-                self.refine_by.append(fileh['/level_'+str(level)].attrs['ref_ratio'])
+            self.refine_by = fileh['/level_0'].attrs['ref_ratio']
 
     def _parse_pluto_file(self, ini_filename):
         """



https://bitbucket.org/yt_analysis/yt/changeset/16fb2445d8d9/
changeset:   16fb2445d8d9
branch:      yt
user:        Andrew Myers
date:        2012-07-05 22:35:18
summary:     merging
affected #:  190 files
Diff too large to display.

https://bitbucket.org/yt_analysis/yt/changeset/878fb4469b65/
changeset:   878fb4469b65
branch:      yt
user:        Andrew Myers
date:        2012-07-06 00:58:49
summary:     adding a backwards Euler integrator for streamlines
affected #:  1 file

diff -r 16fb2445d8d9ddd3da99417eca90d721509cf670 -r 878fb4469b655a24530853c07ad2d79a654edaba yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -137,6 +137,48 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
+    def integrate_streamline_BE(self, pos, np.float64_t h, mag):
+        cdef np.float64_t cmag[1]
+        cdef np.float64_t k1[3]
+        cdef np.float64_t newpos[3], oldpos[3], guess[3]
+        cdef np.float64_t tol, err, 
+        cdef np.int32_t iter, maxiter
+
+        tol = 0.01 * h
+        maxiter = 100
+
+        for i in range(3):
+            newpos[i] = oldpos[i] = guess[i] = pos[i]
+
+        err = 1.e300
+        iter = 0
+        while ( (err > tol) and (iter < maxiter) ):
+
+            for i in range(3):
+                guess[i] = newpos[i]
+            self.get_vector_field(guess, k1, cmag)
+
+            for i in range(3):
+                newpos[i] = oldpos[i] + k1[i]*h 
+
+            err = 0.
+            for i in range(3):
+                err += (newpos[i]-guess[i])**2.0
+            err = sqrt(err)
+            iter += 1
+
+        for i in range(3):
+            pos[i] = newpos[i]
+
+        if mag is not None:
+            for i in range(3):
+                newpos[i] = pos[i]
+            self.get_vector_field(newpos, k1, cmag)
+            mag[0] = cmag[0]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def integrate_streamline(self, pos, np.float64_t h, mag):
         cdef np.float64_t cmag[1]
         cdef np.float64_t k1[3], k2[3], k3[3], k4[3]



https://bitbucket.org/yt_analysis/yt/changeset/4ff6d4d8a819/
changeset:   4ff6d4d8a819
branch:      yt
user:        Andrew Myers
date:        2012-07-07 00:27:39
summary:     removing backwards euler, problem turned out to be something simpler
affected #:  1 file

diff -r 878fb4469b655a24530853c07ad2d79a654edaba -r 4ff6d4d8a81911626849c909c10029bcc659b58f yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -137,48 +137,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def integrate_streamline_BE(self, pos, np.float64_t h, mag):
-        cdef np.float64_t cmag[1]
-        cdef np.float64_t k1[3]
-        cdef np.float64_t newpos[3], oldpos[3], guess[3]
-        cdef np.float64_t tol, err, 
-        cdef np.int32_t iter, maxiter
-
-        tol = 0.01 * h
-        maxiter = 100
-
-        for i in range(3):
-            newpos[i] = oldpos[i] = guess[i] = pos[i]
-
-        err = 1.e300
-        iter = 0
-        while ( (err > tol) and (iter < maxiter) ):
-
-            for i in range(3):
-                guess[i] = newpos[i]
-            self.get_vector_field(guess, k1, cmag)
-
-            for i in range(3):
-                newpos[i] = oldpos[i] + k1[i]*h 
-
-            err = 0.
-            for i in range(3):
-                err += (newpos[i]-guess[i])**2.0
-            err = sqrt(err)
-            iter += 1
-
-        for i in range(3):
-            pos[i] = newpos[i]
-
-        if mag is not None:
-            for i in range(3):
-                newpos[i] = pos[i]
-            self.get_vector_field(newpos, k1, cmag)
-            mag[0] = cmag[0]
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     def integrate_streamline(self, pos, np.float64_t h, mag):
         cdef np.float64_t cmag[1]
         cdef np.float64_t k1[3], k2[3], k3[3], k4[3]



https://bitbucket.org/yt_analysis/yt/changeset/3902b693eb27/
changeset:   3902b693eb27
branch:      yt
user:        Andrew Myers
date:        2012-07-07 00:28:15
summary:     correcting bug in streamline integration code
affected #:  1 file

diff -r 4ff6d4d8a81911626849c909c10029bcc659b58f -r 3902b693eb276bd25e634f25d851b306009d4e13 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -204,7 +204,7 @@
 
         for i in range(3):
             ci[i] = (int)((pos[i]-self.LeftEdge[i])/c.dds[i])
-            dp[i] = (pos[i] - self.LeftEdge[i])%(c.dds[i])
+            dp[i] = (pos[i] - ci[i]*c.dds[i] - self.LeftEdge[i])/c.dds[i]
 
         cdef int offset = ci[0] * (c.dims[1] + 1) * (c.dims[2] + 1) \
                           + ci[1] * (c.dims[2] + 1) + ci[2]



https://bitbucket.org/yt_analysis/yt/changeset/2dbe16f26901/
changeset:   2dbe16f26901
branch:      yt
user:        Andrew Myers
date:        2012-07-07 00:29:38
summary:     merging from tip
affected #:  6 files

diff -r 3902b693eb276bd25e634f25d851b306009d4e13 -r 2dbe16f269015ed0b6d3077040e4bd522fd27ddc yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -220,9 +220,11 @@
                         break
                     my_bin_ratio *= 2
                     left_index[lixel]  = (center_bins[lixel] -
-                                          my_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
+                                          my_bin_ratio *
+                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
                     right_index[lixel] = (center_bins[lixel] +
-                                          my_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
+                                          my_bin_ratio *
+                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
                 if line['label_threshold'] is not None and \
                         column_density[lixel] >= line['label_threshold']:


diff -r 3902b693eb276bd25e634f25d851b306009d4e13 -r 2dbe16f269015ed0b6d3077040e4bd522fd27ddc yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -36,6 +36,7 @@
     suppressstreamlogging = 'False',
     loglevel = '20',
     inline = 'False',
+    numthreads = '-1',
     __withinreason = 'False',
     __parallel = 'False',
     __global_parallel_rank = '0',
@@ -52,7 +53,7 @@
     pasteboard_repo = '',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
-    hub_url = 'https://data.yt-project.org/upload',
+    hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety


diff -r 3902b693eb276bd25e634f25d851b306009d4e13 -r 2dbe16f269015ed0b6d3077040e4bd522fd27ddc yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2474,6 +2474,8 @@
     def get_data(self, fields=None, in_grids=False, force_particle_read = False):
         if self._grids == None:
             self._get_list_of_grids()
+        if len(self._grids) == 0:
+            raise YTNoDataInObjectError(self)
         points = []
         if not fields:
             fields_to_get = self.fields[:]
@@ -3584,8 +3586,8 @@
         fields : array_like, optional
             A list of fields that you'd like pre-generated for your object
 
-        Example
-        -------
+        Examples
+        --------
         cube = pf.h.covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
                                   right_edge=[1.0, 1.0, 1.0],
                                   dims=[128, 128, 128])


diff -r 3902b693eb276bd25e634f25d851b306009d4e13 -r 2dbe16f269015ed0b6d3077040e4bd522fd27ddc yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -553,3 +553,12 @@
     yield fn
     p.disable()
     p.dump_stats(fn)
+
+def get_num_threads():
+    from .config import ytcfg
+    nt = ytcfg.getint("yt","numthreads")
+    if nt < 0:
+        return os.environ.get("OMP_NUM_THREADS", 0)
+    return nt
+        
+


diff -r 3902b693eb276bd25e634f25d851b306009d4e13 -r 2dbe16f269015ed0b6d3077040e4bd522fd27ddc yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -98,6 +98,8 @@
             gs = getattr(pobj, attr)
         else:
             gs = getattr(pobj._data_source, attr)
+        if len(gs) == 0:
+            raise YTNoDataInObjectError(pobj)
         if hasattr(gs[0], 'proc_num'):
             # This one sort of knows about MPI, but not quite
             self._objs = [g for g in gs if g.proc_num ==


diff -r 3902b693eb276bd25e634f25d851b306009d4e13 -r 2dbe16f269015ed0b6d3077040e4bd522fd27ddc yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -49,6 +49,9 @@
     pixelize_healpix, arr_fisheye_vectors
 
 class Camera(ParallelAnalysisInterface):
+
+    _sampler_object = VolumeRenderSampler
+
     def __init__(self, center, normal_vector, width,
                  resolution, transfer_function,
                  north_vector = None, steady_north=False,
@@ -311,7 +314,7 @@
             sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
                     light_rgba=self.light_rgba)
         else:
-            sampler = VolumeRenderSampler(*args)
+            sampler = self._sampler_object(*args)
         return sampler
 
     def finalize_image(self, image):
@@ -349,7 +352,7 @@
         return self.volume.initialize_source()
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
-                 num_threads = 0):
+                 num_threads = None):
         r"""Ray-cast the camera.
 
         This method instructs the camera to take a snapshot -- i.e., call the ray
@@ -376,6 +379,8 @@
         image : array
             An (N,M,3) array of the final returned values, in float64 form.
         """
+        if num_threads is None:
+            num_threads=get_num_threads()
         image = self.new_image()
 
         args = self.get_sampler_args(image)
@@ -753,7 +758,7 @@
         return image
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
-                 num_threads = 0, clim = None):
+                 num_threads = None, clim = None):
         r"""Ray-cast the camera.
 
         This method instructs the camera to take a snapshot -- i.e., call the ray
@@ -773,6 +778,8 @@
         image : array
             An (N,M,3) array of the final returned values, in float64 form.
         """
+        if num_threads is None:
+            num_threads=get_num_threads()
         image = self.new_image()
 
         args = self.get_sampler_args(image)
@@ -1610,7 +1617,10 @@
                 write_image(im, fn)
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
-                 num_threads = 0):
+                 num_threads = None):
+
+        if num_threads is None:
+            num_threads=get_num_threads()
 
         fields = [self.field]
         resolution = self.resolution
@@ -1633,7 +1643,7 @@
 data_object_registry["projection_camera"] = ProjectionCamera
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, num_threads = 0, 
+                        field, weight = None, 
                         volume = None, no_ghost = False, interpolated = False):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
@@ -1695,7 +1705,7 @@
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
             field, weight=weight, pf=pf, volume=volume,
             no_ghost=no_ghost, interpolated=interpolated)
-    image = projcam.snapshot(num_threads=num_threads)
+    image = projcam.snapshot()
     if weight is not None:
         pf.field_info.pop("temp_weightfield")
     del projcam



https://bitbucket.org/yt_analysis/yt/changeset/a5b46d54d7f7/
changeset:   a5b46d54d7f7
branch:      yt
user:        samskillman
date:        2012-07-07 01:08:02
summary:     Merged in atmyers/yt (pull request #186)
affected #:  3 files





diff -r 4ef3c28ca8e75311be62036411ec72b2a5dfd321 -r a5b46d54d7f768b64c6d75ec6c504940438aaca0 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -204,7 +204,7 @@
 
         for i in range(3):
             ci[i] = (int)((pos[i]-self.LeftEdge[i])/c.dds[i])
-            dp[i] = (pos[i] - self.LeftEdge[i])%(c.dds[i])
+            dp[i] = (pos[i] - ci[i]*c.dds[i] - self.LeftEdge[i])/c.dds[i]
 
         cdef int offset = ci[0] * (c.dims[1] + 1) * (c.dims[2] + 1) \
                           + ci[1] * (c.dims[2] + 1) + ci[2]

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list