[yt-svn] commit/yt: 17 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Sep 23 07:49:38 PDT 2015


17 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/ad1688b183af/
Changeset:   ad1688b183af
Branch:      yt
User:        BW Keller
Date:        2015-09-03 02:18:19+00:00
Summary:     Fixed the auxilliary file loading function, did a total rewrite.  "A bug"
would be a serious understatement.  Hell is reading your own old code.
Affected #:  1 file

diff -r 54a3381ac0b10b469eb10e3d63857a508448c8b5 -r ad1688b183af55cf45596bdb74664badd239084c yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -83,30 +83,17 @@
         # either floats, ints, or doubles.  We're going to use a try-catch cascade to
         # determine the format.
         try:#ASCII
-            auxdata = np.genfromtxt(filename, skip_header=1)
+            auxdata = np.genfromtxt(filename, skip_header=0)
+            if auxdata[0] != np.sum(data_file.total_particles.values()):
+                raise IndexError
+            auxdata = auxdata[1:]
+        except IndexError:#binary/xdr
+            #l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
+            auxin = np.fromfile(filename, dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux', data_file.ds.endian+'d', np.sum(data_file.total_particles.values()))]))
+            auxdata = auxin['aux'].flatten()
             if auxdata.size != np.sum(data_file.total_particles.values()):
-                print("Error reading auxiliary tipsy file")
-                raise RuntimeError 
-        except ValueError:#binary/xdr
-            f = open(filename, 'rb')
-            l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
-            if l != np.sum(data_file.total_particles.values()):
-                print("Error reading auxiliary tipsy file")
-                raise RuntimeError
-            dtype = 'd'
-            if field in ('iord', 'igasorder', 'grp'):#These fields are integers
-                dtype = 'i'
-            try:# If we try loading doubles by default, we can catch an exception and try floats next
-                auxdata = np.array(struct.unpack(data_file.ds.endian+(l*dtype), f.read()))
-            except struct.error:
-                f.seek(4)
-                dtype = 'f'
-                try:
-                    auxdata = np.array(struct.unpack(data_file.ds.endian+(l*dtype), f.read()))
-                except struct.error: # None of the binary attempts to read succeeded
-                    print("Error reading auxiliary tipsy file")
-                    raise RuntimeError
-
+                auxin = np.fromfile(filename, dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux', data_file.ds.endian+'f', np.sum(data_file.total_particles.values()))]))
+                auxdata = auxin['aux'].flatten()
         # Use the mask to slice out the appropriate particle type data
         if mask.size == data_file.total_particles['Gas']:
             return auxdata[:data_file.total_particles['Gas']]


https://bitbucket.org/yt_analysis/yt/commits/d96ba6f643d0/
Changeset:   d96ba6f643d0
Branch:      yt
User:        BW Keller
Date:        2015-09-03 02:37:07+00:00
Summary:     Cleaned up Auxiliary refactor.
Affected #:  1 file

diff -r ad1688b183af55cf45596bdb74664badd239084c -r d96ba6f643d07e95a630a883e5672c466398a874 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -88,11 +88,17 @@
                 raise IndexError
             auxdata = auxdata[1:]
         except IndexError:#binary/xdr
-            #l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
-            auxin = np.fromfile(filename, dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux', data_file.ds.endian+'d', np.sum(data_file.total_particles.values()))]))
+            auxin = np.fromfile(filename,
+                    dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
+                    data_file.ds.endian+'d',
+                    np.sum(data_file.total_particles.values()))]))
+
             auxdata = auxin['aux'].flatten()
             if auxdata.size != np.sum(data_file.total_particles.values()):
-                auxin = np.fromfile(filename, dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux', data_file.ds.endian+'f', np.sum(data_file.total_particles.values()))]))
+                auxin = np.fromfile(filename,
+                        dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
+                        data_file.ds.endian+'f',
+                        np.sum(data_file.total_particles.values()))]))
                 auxdata = auxin['aux'].flatten()
         # Use the mask to slice out the appropriate particle type data
         if mask.size == data_file.total_particles['Gas']:


https://bitbucket.org/yt_analysis/yt/commits/99a80fabd869/
Changeset:   99a80fabd869
Branch:      yt
User:        BW Keller
Date:        2015-09-03 17:36:32+00:00
Summary:     More robust version of the aux file method.  Also avoids reading
full file multiple times.
Affected #:  1 file

diff -r d96ba6f643d07e95a630a883e5672c466398a874 -r 99a80fabd86990c0eee57367e2164b5ad72da238 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -82,24 +82,27 @@
         # files are in.  They can be either ascii or binary, and the binary files can be
         # either floats, ints, or doubles.  We're going to use a try-catch cascade to
         # determine the format.
-        try:#ASCII
-            auxdata = np.genfromtxt(filename, skip_header=0)
-            if auxdata[0] != np.sum(data_file.total_particles.values()):
-                raise IndexError
-            auxdata = auxdata[1:]
-        except IndexError:#binary/xdr
+        filesize = os.stat(filename).st_size
+        if np.fromfile(filename, dtype=np.dtype(data_file.ds.endian+'i4'), 
+                count=1) != np.sum(data_file.total_particles.values()):
+            with open(filename) as f:
+                if int(f.readline()) != np.sum(data_file.total_particles.values()):
+                    raise RuntimeError
+            auxdata = np.genfromtxt(filename, skip_header=1)
+        elif (filesize-4)/8 == np.sum(data_file.total_particles.values()):
             auxin = np.fromfile(filename,
                     dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
                     data_file.ds.endian+'d',
                     np.sum(data_file.total_particles.values()))]))
-
             auxdata = auxin['aux'].flatten()
-            if auxdata.size != np.sum(data_file.total_particles.values()):
-                auxin = np.fromfile(filename,
-                        dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
-                        data_file.ds.endian+'f',
-                        np.sum(data_file.total_particles.values()))]))
-                auxdata = auxin['aux'].flatten()
+        elif (filesize-4)/4 == np.sum(data_file.total_particles.values()):
+            auxin = np.fromfile(filename,
+                    dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
+                    data_file.ds.endian+'f',
+                    np.sum(data_file.total_particles.values()))]))
+            auxdata = auxin['aux'].flatten()
+        else:
+            raise RuntimeError
         # Use the mask to slice out the appropriate particle type data
         if mask.size == data_file.total_particles['Gas']:
             return auxdata[:data_file.total_particles['Gas']]


https://bitbucket.org/yt_analysis/yt/commits/52d006c91dc7/
Changeset:   52d006c91dc7
Branch:      yt
User:        xarthisius
Date:        2015-09-17 16:16:12+00:00
Summary:     Remove flake8 warnigs
Affected #:  1 file

diff -r 99a80fabd86990c0eee57367e2164b5ad72da238 -r 52d006c91dc765b5c6e91daa748fce397f55245e yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -19,15 +19,13 @@
 import numpy as np
 import os
 
-from yt.geometry.oct_container import \
-    _ORDER_MAX
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 from yt.utilities.logger import ytLogger as \
     mylog
-    
+
 CHUNKSIZE = 10000000
 
 class IOHandlerTipsyBinary(BaseIOHandler):
@@ -77,13 +75,12 @@
         This method will automatically detect the format of the file.
         """
         filename = data_file.filename+'.'+field
-        dtype = None
         # We need to do some fairly ugly detection to see what format the auxiliary
         # files are in.  They can be either ascii or binary, and the binary files can be
         # either floats, ints, or doubles.  We're going to use a try-catch cascade to
         # determine the format.
         filesize = os.stat(filename).st_size
-        if np.fromfile(filename, dtype=np.dtype(data_file.ds.endian+'i4'), 
+        if np.fromfile(filename, dtype=np.dtype(data_file.ds.endian+'i4'),
                 count=1) != np.sum(data_file.total_particles.values()):
             with open(filename) as f:
                 if int(f.readline()) != np.sum(data_file.total_particles.values()):
@@ -209,12 +206,11 @@
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
                 if count == 0: continue
-                start, stop = ind, ind + count
+                stop = ind + count
                 while ind < stop:
                     c = min(CHUNKSIZE, stop - ind)
                     pp = np.fromfile(f, dtype = self._pdtypes[ptype],
                                      count = c)
-                    eps = np.finfo(pp["Coordinates"]["x"].dtype).eps
                     np.minimum(mi, [pp["Coordinates"]["x"].min(),
                                     pp["Coordinates"]["y"].min(),
                                     pp["Coordinates"]["z"].min()], mi)
@@ -238,7 +234,6 @@
                           dtype="uint64")
         ind = 0
         DLE, DRE = ds.domain_left_edge, ds.domain_right_edge
-        dx = (DRE - DLE) / (2**_ORDER_MAX)
         self.domain_left_edge = DLE.in_units("code_length").ndarray_view()
         self.domain_right_edge = DRE.in_units("code_length").ndarray_view()
         with open(data_file.filename, "rb") as f:
@@ -247,7 +242,7 @@
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
                 if count == 0: continue
-                start, stop = ind, ind + count
+                stop = ind + count
                 while ind < stop:
                     c = min(CHUNKSIZE, stop - ind)
                     pp = np.fromfile(f, dtype = self._pdtypes[ptype],
@@ -262,7 +257,6 @@
                         mas[axi] = ma
                     pos = np.empty((pp.size, 3), dtype="float64")
                     for i, ax in enumerate("xyz"):
-                        eps = np.finfo(pp["Coordinates"][ax].dtype).eps
                         pos[:,i] = pp["Coordinates"][ax]
                     regions.add_data_file(pos, data_file.file_id,
                                           data_file.ds.filter_bbox)
@@ -301,7 +295,6 @@
         # We can just look at the particle counts.
         self._header_offset = data_file.ds._header_offset
         self._pdtypes = {}
-        pds = {}
         field_list = []
         tp = data_file.total_particles
         aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have


https://bitbucket.org/yt_analysis/yt/commits/9dcd2fa7d06a/
Changeset:   9dcd2fa7d06a
Branch:      yt
User:        xarthisius
Date:        2015-09-17 16:28:24+00:00
Summary:     Minor refactor
Affected #:  1 file

diff -r 52d006c91dc765b5c6e91daa748fce397f55245e -r 9dcd2fa7d06a41ca421a9f4581f738eeba45987e yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -75,28 +75,29 @@
         This method will automatically detect the format of the file.
         """
         filename = data_file.filename+'.'+field
+        tot_parts = np.sum(data_file.total_particles.values())
         # We need to do some fairly ugly detection to see what format the auxiliary
         # files are in.  They can be either ascii or binary, and the binary files can be
         # either floats, ints, or doubles.  We're going to use a try-catch cascade to
         # determine the format.
         filesize = os.stat(filename).st_size
         if np.fromfile(filename, dtype=np.dtype(data_file.ds.endian+'i4'),
-                count=1) != np.sum(data_file.total_particles.values()):
+                count=1) != tot_parts:
             with open(filename) as f:
-                if int(f.readline()) != np.sum(data_file.total_particles.values()):
+                if int(f.readline()) != tot_parts:
                     raise RuntimeError
             auxdata = np.genfromtxt(filename, skip_header=1)
-        elif (filesize-4)/8 == np.sum(data_file.total_particles.values()):
+        elif (filesize-4)/8 == tot_parts:
             auxin = np.fromfile(filename,
                     dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
                     data_file.ds.endian+'d',
-                    np.sum(data_file.total_particles.values()))]))
+                    tot_parts)]))
             auxdata = auxin['aux'].flatten()
-        elif (filesize-4)/4 == np.sum(data_file.total_particles.values()):
+        elif (filesize-4)/4 == tot_parts:
             auxin = np.fromfile(filename,
                     dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
                     data_file.ds.endian+'f',
-                    np.sum(data_file.total_particles.values()))]))
+                    tot_parts)]))
             auxdata = auxin['aux'].flatten()
         else:
             raise RuntimeError


https://bitbucket.org/yt_analysis/yt/commits/4013dd8ac9e1/
Changeset:   4013dd8ac9e1
Branch:      yt
User:        xarthisius
Date:        2015-09-17 16:56:41+00:00
Summary:     Detect aux file format during load() rather than during actual i/o
Affected #:  1 file

diff -r 9dcd2fa7d06a41ca421a9f4581f738eeba45987e -r 4013dd8ac9e1fd5bb2a558cea1ec67deec45297e yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -74,33 +74,13 @@
         Read in auxiliary files from gasoline/pkdgrav.
         This method will automatically detect the format of the file.
         """
-        filename = data_file.filename+'.'+field
-        tot_parts = np.sum(data_file.total_particles.values())
-        # We need to do some fairly ugly detection to see what format the auxiliary
-        # files are in.  They can be either ascii or binary, and the binary files can be
-        # either floats, ints, or doubles.  We're going to use a try-catch cascade to
-        # determine the format.
-        filesize = os.stat(filename).st_size
-        if np.fromfile(filename, dtype=np.dtype(data_file.ds.endian+'i4'),
-                count=1) != tot_parts:
-            with open(filename) as f:
-                if int(f.readline()) != tot_parts:
-                    raise RuntimeError
-            auxdata = np.genfromtxt(filename, skip_header=1)
-        elif (filesize-4)/8 == tot_parts:
-            auxin = np.fromfile(filename,
-                    dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
-                    data_file.ds.endian+'d',
-                    tot_parts)]))
-            auxdata = auxin['aux'].flatten()
-        elif (filesize-4)/4 == tot_parts:
-            auxin = np.fromfile(filename,
-                    dtype=np.dtype([('l',data_file.ds.endian+'i4'), ('aux',
-                    data_file.ds.endian+'f',
-                    tot_parts)]))
+        filename = data_file.filename + '.' + field
+        if isinstance(self._aux_pdtypes[field], np.dtype):
+            auxin = np.fromfile(filename, dtype=self._aux_pdtypes[field])
             auxdata = auxin['aux'].flatten()
         else:
-            raise RuntimeError
+            auxdata = np.genfromtxt(filename, skip_header=1)
+
         # Use the mask to slice out the appropriate particle type data
         if mask.size == data_file.total_particles['Gas']:
             return auxdata[:data_file.total_particles['Gas']]
@@ -295,26 +275,49 @@
     def _create_dtypes(self, data_file):
         # We can just look at the particle counts.
         self._header_offset = data_file.ds._header_offset
-        self._pdtypes = {}
-        field_list = []
-        tp = data_file.total_particles
-        aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
-        self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
         self._pdtypes = self._compute_dtypes(data_file.ds._field_dtypes,
                                              data_file.ds.endian)
+        self._field_list = []
         for ptype, field in self._fields:
-            if tp[ptype] == 0:
+            if data_file.total_particles[ptype] == 0:
                 # We do not want out _pdtypes to have empty particles.
                 self._pdtypes.pop(ptype, None)
                 continue
-            field_list.append((ptype, field))
-        if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
-            field_list += [("Gas",a) for a in self._aux_fields]
-        if any(["DarkMatter"==f[0] for f in field_list]):
-            field_list += [("DarkMatter",a) for a in self._aux_fields]
-        if any(["Stars"==f[0] for f in field_list]):
-            field_list += [("Stars",a) for a in self._aux_fields]
-        self._field_list = field_list
+            self._field_list.append((ptype, field))
+
+        # Find out which auxiliaries we have and what is their format
+        tot_parts = np.sum(data_file.total_particles.values())
+        endian = data_file.ds.endian
+        self._aux_pdtypes = {}
+        self._aux_fields = [f.rsplit('.')[-1]
+                            for f in glob.glob(data_file.filename + '.*')]
+        for afield in self._aux_fields:
+            filename = data_file.filename + '.' + afield
+            # We need to do some fairly ugly detection to see what format the
+            # auxiliary files are in.  They can be either ascii or binary, and
+            # the binary files can be either floats, ints, or doubles.  We're
+            # going to use a try-catch cascade to determine the format.
+            filesize = os.stat(filename).st_size
+            if np.fromfile(filename, dtype=np.dtype(endian + 'i4'),
+                           count=1) != tot_parts:
+                with open(filename) as f:
+                    if int(f.readline()) != tot_parts:
+                        raise RuntimeError
+                self._aux_pdtypes[afield] = "ascii"
+            elif (filesize - 4) / 8 == tot_parts:
+                self._aux_pdtypes[afield] = np.dtype(
+                    [('l', endian + 'i4'), ('aux', endian + 'd', tot_parts)])
+            elif (filesize - 4) / 4 == tot_parts:
+                self._aux_pdtypes[afield] = np.dtype(
+                    [('l', endian + 'i4'), ('aux', endian + 'f', tot_parts)])
+            else:
+                raise RuntimeError
+
+        # Add the auxiliary fields to each ptype we have
+        for ptype in ["Gas", "DarkMatter", "Stars"]:
+            if any([ptype == field[0] for field in self._field_list]):
+                self._field_list += \
+                    [(ptype, afield) for afield in self._aux_fields]
         return self._field_list
 
     def _identify_fields(self, data_file):


https://bitbucket.org/yt_analysis/yt/commits/f5bb987a4398/
Changeset:   f5bb987a4398
Branch:      yt
User:        xarthisius
Date:        2015-09-17 18:33:17+00:00
Summary:     Fix chunking i/o for aux files
Affected #:  1 file

diff -r 4013dd8ac9e1fd5bb2a558cea1ec67deec45297e -r f5bb987a4398f9e2d088078616ff0b1c65f1b3d6 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -17,6 +17,7 @@
 
 import glob
 import numpy as np
+from numpy.lib.recfunctions import append_fields
 import os
 
 from yt.utilities.io_handler import \
@@ -32,7 +33,8 @@
     _dataset_type = "tipsy"
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
-    _pdtypes = None # dtypes, to be filled in later
+    _pdtypes = None  # dtypes, to be filled in later
+    _aux_pdtypes = None  # auxiliary files' dtypes
 
     _ptypes = ( "Gas",
                 "DarkMatter",
@@ -69,26 +71,6 @@
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
-    def _read_aux_fields(self, field, mask, data_file):
-        """
-        Read in auxiliary files from gasoline/pkdgrav.
-        This method will automatically detect the format of the file.
-        """
-        filename = data_file.filename + '.' + field
-        if isinstance(self._aux_pdtypes[field], np.dtype):
-            auxin = np.fromfile(filename, dtype=self._aux_pdtypes[field])
-            auxdata = auxin['aux'].flatten()
-        else:
-            auxdata = np.genfromtxt(filename, skip_header=1)
-
-        # Use the mask to slice out the appropriate particle type data
-        if mask.size == data_file.total_particles['Gas']:
-            return auxdata[:data_file.total_particles['Gas']]
-        elif mask.size == data_file.total_particles['DarkMatter']:
-            return auxdata[data_file.total_particles['Gas']:-data_file.total_particles['DarkMatter']]
-        else:
-            return auxdata[-data_file.total_particles['Stars']:]
-
     def _fill_fields(self, fields, vals, mask, data_file):
         if mask is None:
             size = 0
@@ -97,9 +79,7 @@
         rv = {}
         for field in fields:
             mylog.debug("Allocating %s values for %s", size, field)
-            if field in self._aux_fields: #Read each of the auxiliary fields
-                rv[field] = self._read_aux_fields(field, mask, data_file)
-            elif field in self._vector_fields:
+            if field in self._vector_fields:
                 rv[field] = np.empty((size, 3), dtype="float64")
                 if size == 0: continue
                 rv[field][:,0] = vals[field]['x'][mask]
@@ -138,6 +118,7 @@
                     del p
                     yield ptype, d
 
+
     def _read_particle_fields(self, chunks, ptf, selector):
         chunks = list(chunks)
         data_files = set([])
@@ -148,13 +129,43 @@
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
+
+            # TODO refactor this loop
+            # we need to open all aux files for chunking to work
+            aux_fh = {}
+            aux_fields_offsets = {}
+            for afield in self._aux_fields:
+                aux_fh[afield] = open(data_file.filename + '.' + afield, 'rb')
+                pos = 4     # TODO fixme
+                aux_fields_offsets[afield] = {}
+                for ptype in self._ptypes:
+                    aux_fields_offsets[afield][ptype] = pos
+                    if data_file.total_particles[ptype] == 0:
+                        continue
+                    size = np.dtype(self._aux_pdtypes[afield]).itemsize
+                    pos += data_file.total_particles[ptype] * size
+
             for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
                 f.seek(poff[ptype], os.SEEK_SET)
+                afields = list(set(field_list).intersection(self._aux_fields))
+                for afield in afields:
+                    aux_fh[afield].seek(aux_fields_offsets[afield][ptype], os.SEEK_SET)
+
                 total = 0
                 while total < tp[ptype]:
-                    p = np.fromfile(f, self._pdtypes[ptype],
-                        count=min(self._chunksize, tp[ptype] - total))
+                    count = min(self._chunksize, tp[ptype] - total)
+                    p = np.fromfile(f, self._pdtypes[ptype], count=count)
                     total += p.size
+                    auxdata = []
+                    for afield in afields:
+                        if isinstance(self._aux_pdtypes[afield], type):
+                            auxdata.append(
+                                np.fromfile(aux_fh[afield], self._aux_pdtypes[afield],
+                                            count=count)
+                            )
+                        else:
+                            auxdata.append(np.genfromtxt(fh, skip_header=1, count=count))
+                    p = append_fields(p, afields, auxdata)
                     mask = selector.select_points(
                         p["Coordinates"]['x'].astype("float64"),
                         p["Coordinates"]['y'].astype("float64"),
@@ -164,6 +175,8 @@
                     for field in field_list:
                         yield (ptype, field), tf.pop(field)
             f.close()
+            for fh in list(aux_fh.values()):
+                fh.close()
 
     def _update_domain(self, data_file):
         '''
@@ -298,18 +311,16 @@
             # the binary files can be either floats, ints, or doubles.  We're
             # going to use a try-catch cascade to determine the format.
             filesize = os.stat(filename).st_size
-            if np.fromfile(filename, dtype=np.dtype(endian + 'i4'),
+            if np.fromfile(filename, np.dtype(endian + 'i4'),
                            count=1) != tot_parts:
                 with open(filename) as f:
                     if int(f.readline()) != tot_parts:
                         raise RuntimeError
                 self._aux_pdtypes[afield] = "ascii"
             elif (filesize - 4) / 8 == tot_parts:
-                self._aux_pdtypes[afield] = np.dtype(
-                    [('l', endian + 'i4'), ('aux', endian + 'd', tot_parts)])
+                self._aux_pdtypes[afield] = np.float64
             elif (filesize - 4) / 4 == tot_parts:
-                self._aux_pdtypes[afield] = np.dtype(
-                    [('l', endian + 'i4'), ('aux', endian + 'f', tot_parts)])
+                self._aux_pdtypes[afield] = np.float32
             else:
                 raise RuntimeError
 


https://bitbucket.org/yt_analysis/yt/commits/9671a748b13a/
Changeset:   9671a748b13a
Branch:      yt
User:        xarthisius
Date:        2015-09-17 21:09:15+00:00
Summary:     Refactor aux_fields' offset calculation
Affected #:  2 files

diff -r f5bb987a4398f9e2d088078616ff0b1c65f1b3d6 -r 9671a748b13acdb6179e9c40e11f50f560902bdd yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -71,7 +71,7 @@
                  bounding_box=None,
                  units_override=None):
         # Because Tipsy outputs don't have a fixed domain boundary, one can
-        # specify a bounding box which effectively gives a domain_left_edge 
+        # specify a bounding box which effectively gives a domain_left_edge
         # and domain_right_edge
         self.bounding_box = bounding_box
         self.filter_bbox = (bounding_box is not None)
@@ -179,7 +179,7 @@
             else:
                 self.domain_left_edge = None
                 self.domain_right_edge = None
-        else: 
+        else:
             bbox = np.array(self.bounding_box, dtype="float64")
             if bbox.shape == (2, 3):
                 bbox = bbox.transpose()
@@ -225,7 +225,7 @@
             self.mass_unit = self.quan(mu, 'Msun')
             density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
             # Gasoline's hubble constant, dHubble0, is stored units of proper code time.
-            self.hubble_constant *= np.sqrt(G.in_units('kpc**3*Msun**-1*s**-2')*density_unit).value/(3.2407793e-18)  
+            self.hubble_constant *= np.sqrt(G.in_units('kpc**3*Msun**-1*s**-2')*density_unit).value/(3.2407793e-18)
             cosmo = Cosmology(self.hubble_constant,
                               self.omega_matter, self.omega_lambda)
             self.current_time = cosmo.hubble_time(self.current_redshift)
@@ -242,7 +242,7 @@
         '''
         This method automatically detects whether the tipsy file is big/little endian
         and is not corrupt/invalid.  It returns a tuple of (Valid, endianswap) where
-        Valid is a boolean that is true if the file is a tipsy file, and endianswap is 
+        Valid is a boolean that is true if the file is a tipsy file, and endianswap is
         the endianness character '>' or '<'.
         '''
         try:

diff -r f5bb987a4398f9e2d088078616ff0b1c65f1b3d6 -r 9671a748b13acdb6179e9c40e11f50f560902bdd yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -127,23 +127,15 @@
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
             poff = data_file.field_offsets
+            aux_fields_offsets = \
+                self._calculate_particle_offsets_aux(data_file)
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
 
-            # TODO refactor this loop
             # we need to open all aux files for chunking to work
             aux_fh = {}
-            aux_fields_offsets = {}
             for afield in self._aux_fields:
                 aux_fh[afield] = open(data_file.filename + '.' + afield, 'rb')
-                pos = 4     # TODO fixme
-                aux_fields_offsets[afield] = {}
-                for ptype in self._ptypes:
-                    aux_fields_offsets[afield][ptype] = pos
-                    if data_file.total_particles[ptype] == 0:
-                        continue
-                    size = np.dtype(self._aux_pdtypes[afield]).itemsize
-                    pos += data_file.total_particles[ptype] * size
 
             for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
                 f.seek(poff[ptype], os.SEEK_SET)
@@ -164,7 +156,10 @@
                                             count=count)
                             )
                         else:
-                            auxdata.append(np.genfromtxt(fh, skip_header=1, count=count))
+                            auxdata.append(
+                                np.genfromtxt(aux_fh[afield], skip_header=1,
+                                              count=count)
+                            )
                     p = append_fields(p, afields, auxdata)
                     mask = selector.select_points(
                         p["Coordinates"]['x'].astype("float64"),
@@ -174,6 +169,8 @@
                     tf = self._fill_fields(field_list, p, mask, data_file)
                     for field in field_list:
                         yield (ptype, field), tf.pop(field)
+
+            # close all file handles
             f.close()
             for fh in list(aux_fh.values()):
                 fh.close()
@@ -343,3 +340,20 @@
             size = self._pdtypes[ptype].itemsize
             pos += data_file.total_particles[ptype] * size
         return field_offsets
+
+    def _calculate_particle_offsets_aux(self, data_file):
+        aux_fields_offsets = {}
+        for afield in self._aux_fields:
+            if isinstance(self._aux_pdtypes[afield], type):
+                pos = 4  # i4
+                aux_fields_offsets[afield] = {}
+                for ptype in self._ptypes:
+                    aux_fields_offsets[afield][ptype] = pos
+                    if data_file.total_particles[ptype] == 0:
+                        continue
+                    size = np.dtype(self._aux_pdtypes[afield]).itemsize
+                    pos += data_file.total_particles[ptype] * size
+            else:
+                # handle gentext case
+                raise RuntimeError
+        return aux_fields_offsets


https://bitbucket.org/yt_analysis/yt/commits/2596b7dbea36/
Changeset:   2596b7dbea36
Branch:      yt
User:        xarthisius
Date:        2015-09-17 21:12:26+00:00
Summary:     pep8
Affected #:  1 file

diff -r 9671a748b13acdb6179e9c40e11f50f560902bdd -r 2596b7dbea36853df6c9de4b2a54471eb26c0e3d yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -29,6 +29,7 @@
 
 CHUNKSIZE = 10000000
 
+
 class IOHandlerTipsyBinary(BaseIOHandler):
     _dataset_type = "tipsy"
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
@@ -36,33 +37,33 @@
     _pdtypes = None  # dtypes, to be filled in later
     _aux_pdtypes = None  # auxiliary files' dtypes
 
-    _ptypes = ( "Gas",
-                "DarkMatter",
-                "Stars" )
-    _chunksize = 64*64*64
+    _ptypes = ("Gas",
+               "DarkMatter",
+               "Stars")
+    _chunksize = 64 * 64 * 64
 
     _aux_fields = None
-    _fields = ( ("Gas", "Mass"),
-                ("Gas", "Coordinates"),
-                ("Gas", "Velocities"),
-                ("Gas", "Density"),
-                ("Gas", "Temperature"),
-                ("Gas", "Epsilon"),
-                ("Gas", "Metals"),
-                ("Gas", "Phi"),
-                ("DarkMatter", "Mass"),
-                ("DarkMatter", "Coordinates"),
-                ("DarkMatter", "Velocities"),
-                ("DarkMatter", "Epsilon"),
-                ("DarkMatter", "Phi"),
-                ("Stars", "Mass"),
-                ("Stars", "Coordinates"),
-                ("Stars", "Velocities"),
-                ("Stars", "Metals"),
-                ("Stars", "FormationTime"),
-                ("Stars", "Epsilon"),
-                ("Stars", "Phi")
-              )
+    _fields = (("Gas", "Mass"),
+               ("Gas", "Coordinates"),
+               ("Gas", "Velocities"),
+               ("Gas", "Density"),
+               ("Gas", "Temperature"),
+               ("Gas", "Epsilon"),
+               ("Gas", "Metals"),
+               ("Gas", "Phi"),
+               ("DarkMatter", "Mass"),
+               ("DarkMatter", "Coordinates"),
+               ("DarkMatter", "Velocities"),
+               ("DarkMatter", "Epsilon"),
+               ("DarkMatter", "Phi"),
+               ("Stars", "Mass"),
+               ("Stars", "Coordinates"),
+               ("Stars", "Velocities"),
+               ("Stars", "Metals"),
+               ("Stars", "FormationTime"),
+               ("Stars", "Epsilon"),
+               ("Stars", "Phi")
+               )
 
     def __init__(self, *args, **kwargs):
         self._aux_fields = []
@@ -81,23 +82,24 @@
             mylog.debug("Allocating %s values for %s", size, field)
             if field in self._vector_fields:
                 rv[field] = np.empty((size, 3), dtype="float64")
-                if size == 0: continue
-                rv[field][:,0] = vals[field]['x'][mask]
-                rv[field][:,1] = vals[field]['y'][mask]
-                rv[field][:,2] = vals[field]['z'][mask]
+                if size == 0:
+                    continue
+                rv[field][:, 0] = vals[field]['x'][mask]
+                rv[field][:, 1] = vals[field]['y'][mask]
+                rv[field][:, 2] = vals[field]['z'][mask]
             else:
                 rv[field] = np.empty(size, dtype="float64")
-                if size == 0: continue
+                if size == 0:
+                    continue
                 rv[field][:] = vals[field][mask]
             if field == "Coordinates":
                 eps = np.finfo(rv[field].dtype).eps
                 for i in range(3):
-                  rv[field][:,i] = np.clip(rv[field][:,i],
-                      self.domain_left_edge[i] + eps,
-                      self.domain_right_edge[i] - eps)
+                    rv[field][:, i] = np.clip(rv[field][:, i],
+                                              self.domain_left_edge[i] + eps,
+                                              self.domain_right_edge[i] - eps)
         return rv
 
-
     def _read_particle_coords(self, chunks, ptf):
         data_files = set([])
         for chunk in chunks:
@@ -107,18 +109,19 @@
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
-            for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
+            for ptype, field_list in sorted(ptf.items(),
+                                            key=lambda a: poff[a[0]]):
                 f.seek(poff[ptype], os.SEEK_SET)
                 total = 0
                 while total < tp[ptype]:
-                    p = np.fromfile(f, self._pdtypes[ptype],
-                            count=min(self._chunksize, tp[ptype] - total))
+                    count = min(self._chunksize, tp[ptype] - total)
+                    p = np.fromfile(f, self._pdtypes[ptype], count=count)
                     total += p.size
-                    d = [p["Coordinates"][ax].astype("float64") for ax in 'xyz']
+                    d = [p["Coordinates"][ax].astype("float64")
+                         for ax in 'xyz']
                     del p
                     yield ptype, d
 
-
     def _read_particle_fields(self, chunks, ptf, selector):
         chunks = list(chunks)
         data_files = set([])
@@ -137,11 +140,13 @@
             for afield in self._aux_fields:
                 aux_fh[afield] = open(data_file.filename + '.' + afield, 'rb')
 
-            for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
+            for ptype, field_list in sorted(ptf.items(),
+                                            key=lambda a: poff[a[0]]):
                 f.seek(poff[ptype], os.SEEK_SET)
                 afields = list(set(field_list).intersection(self._aux_fields))
                 for afield in afields:
-                    aux_fh[afield].seek(aux_fields_offsets[afield][ptype], os.SEEK_SET)
+                    aux_fh[afield].seek(
+                        aux_fields_offsets[afield][ptype], os.SEEK_SET)
 
                 total = 0
                 while total < tp[ptype]:
@@ -152,7 +157,8 @@
                     for afield in afields:
                         if isinstance(self._aux_pdtypes[afield], type):
                             auxdata.append(
-                                np.fromfile(aux_fh[afield], self._aux_pdtypes[afield],
+                                np.fromfile(aux_fh[afield],
+                                            self._aux_pdtypes[afield],
                                             count=count)
                             )
                         else:
@@ -165,7 +171,8 @@
                         p["Coordinates"]['x'].astype("float64"),
                         p["Coordinates"]['y'].astype("float64"),
                         p["Coordinates"]['z'].astype("float64"), 0.0)
-                    if mask is None: continue
+                    if mask is None:
+                        continue
                     tf = self._fill_fields(field_list, p, mask, data_file)
                     for field in field_list:
                         yield (ptype, field), tf.pop(field)
@@ -185,23 +192,25 @@
         ind = 0
         # Check to make sure that the domain hasn't already been set
         # by the parameter file
-        if np.all(np.isfinite(ds.domain_left_edge)) and np.all(np.isfinite(ds.domain_right_edge)):
+        if np.all(np.isfinite(ds.domain_left_edge)) and \
+                np.all(np.isfinite(ds.domain_right_edge)):
             return
         with open(data_file.filename, "rb") as f:
             ds.domain_left_edge = 0
             ds.domain_right_edge = 0
             f.seek(ds._header_offset)
-            mi =   np.array([1e30, 1e30, 1e30], dtype="float64")
-            ma =  -np.array([1e30, 1e30, 1e30], dtype="float64")
+            mi = np.array([1e30, 1e30, 1e30], dtype="float64")
+            ma = -np.array([1e30, 1e30, 1e30], dtype="float64")
             for iptype, ptype in enumerate(self._ptypes):
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
-                if count == 0: continue
+                if count == 0:
+                    continue
                 stop = ind + count
                 while ind < stop:
                     c = min(CHUNKSIZE, stop - ind)
-                    pp = np.fromfile(f, dtype = self._pdtypes[ptype],
-                                     count = c)
+                    pp = np.fromfile(f, dtype=self._pdtypes[ptype],
+                                     count=c)
                     np.minimum(mi, [pp["Coordinates"]["x"].min(),
                                     pp["Coordinates"]["y"].min(),
                                     pp["Coordinates"]["z"].min()], mi)
@@ -217,7 +226,7 @@
         ds.domain_right_edge = ds.arr(ma, 'code_length')
         ds.domain_width = DW = ds.domain_right_edge - ds.domain_left_edge
         ds.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
-                                 DW.units.dimensions)
+                             DW.units.dimensions)
 
     def _initialize_index(self, data_file, regions):
         ds = data_file.ds
@@ -232,27 +241,29 @@
             for iptype, ptype in enumerate(self._ptypes):
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
-                if count == 0: continue
+                if count == 0:
+                    continue
                 stop = ind + count
                 while ind < stop:
                     c = min(CHUNKSIZE, stop - ind)
-                    pp = np.fromfile(f, dtype = self._pdtypes[ptype],
-                                     count = c)
+                    pp = np.fromfile(f, dtype=self._pdtypes[ptype],
+                                     count=c)
                     mis = np.empty(3, dtype="float64")
                     mas = np.empty(3, dtype="float64")
                     for axi, ax in enumerate('xyz'):
                         mi = pp["Coordinates"][ax].min()
                         ma = pp["Coordinates"][ax].max()
-                        mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
+                        mylog.debug(
+                            "Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
                         mis[axi] = mi
                         mas[axi] = ma
                     pos = np.empty((pp.size, 3), dtype="float64")
                     for i, ax in enumerate("xyz"):
-                        pos[:,i] = pp["Coordinates"][ax]
+                        pos[:, i] = pp["Coordinates"][ax]
                     regions.add_data_file(pos, data_file.file_id,
                                           data_file.ds.filter_bbox)
-                    morton[ind:ind+c] = compute_morton(
-                        pos[:,0], pos[:,1], pos[:,2],
+                    morton[ind:ind + c] = compute_morton(
+                        pos[:, 0], pos[:, 1], pos[:, 2],
                         DLE, DRE, data_file.ds.filter_bbox)
                     ind += c
         mylog.info("Adding %0.3e particles", morton.size)
@@ -267,7 +278,7 @@
         return npart
 
     @classmethod
-    def _compute_dtypes(cls, field_dtypes, endian = "<"):
+    def _compute_dtypes(cls, field_dtypes, endian="<"):
         pds = {}
         for ptype, field in cls._fields:
             dtbase = field_dtypes.get(field, 'f')
@@ -336,7 +347,8 @@
         pos = data_file.ds._header_offset
         for ptype in self._ptypes:
             field_offsets[ptype] = pos
-            if data_file.total_particles[ptype] == 0: continue
+            if data_file.total_particles[ptype] == 0:
+                continue
             size = self._pdtypes[ptype].itemsize
             pos += data_file.total_particles[ptype] * size
         return field_offsets


https://bitbucket.org/yt_analysis/yt/commits/a8672f15e605/
Changeset:   a8672f15e605
Branch:      yt
User:        xarthisius
Date:        2015-09-21 14:12:03+00:00
Summary:     Only try to append to 'p' if 'afields' is not empty
Affected #:  1 file

diff -r 2596b7dbea36853df6c9de4b2a54471eb26c0e3d -r a8672f15e605afe667e8852953f00bc3b093e8a5 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -166,7 +166,8 @@
                                 np.genfromtxt(aux_fh[afield], skip_header=1,
                                               count=count)
                             )
-                    p = append_fields(p, afields, auxdata)
+                    if afields:
+                        p = append_fields(p, afields, auxdata)
                     mask = selector.select_points(
                         p["Coordinates"]['x'].astype("float64"),
                         p["Coordinates"]['y'].astype("float64"),


https://bitbucket.org/yt_analysis/yt/commits/7f8c17b6c5ad/
Changeset:   7f8c17b6c5ad
Branch:      yt
User:        xarthisius
Date:        2015-09-21 14:31:15+00:00
Summary:     Use proper types including endianess instead of simple numpy types
Affected #:  1 file

diff -r a8672f15e605afe667e8852953f00bc3b093e8a5 -r 7f8c17b6c5adf7b8728c490b2086db8ab036ee5c yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -155,7 +155,7 @@
                     total += p.size
                     auxdata = []
                     for afield in afields:
-                        if isinstance(self._aux_pdtypes[afield], type):
+                        if isinstance(self._aux_pdtypes[afield], np.dtype):
                             auxdata.append(
                                 np.fromfile(aux_fh[afield],
                                             self._aux_pdtypes[afield],
@@ -327,9 +327,9 @@
                         raise RuntimeError
                 self._aux_pdtypes[afield] = "ascii"
             elif (filesize - 4) / 8 == tot_parts:
-                self._aux_pdtypes[afield] = np.float64
+                self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'd')])
             elif (filesize - 4) / 4 == tot_parts:
-                self._aux_pdtypes[afield] = np.float32
+                self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'f')])
             else:
                 raise RuntimeError
 
@@ -357,7 +357,7 @@
     def _calculate_particle_offsets_aux(self, data_file):
         aux_fields_offsets = {}
         for afield in self._aux_fields:
-            if isinstance(self._aux_pdtypes[afield], type):
+            if isinstance(self._aux_pdtypes[afield], np.dtype):
                 pos = 4  # i4
                 aux_fields_offsets[afield] = {}
                 for ptype in self._ptypes:


https://bitbucket.org/yt_analysis/yt/commits/377329e2bc14/
Changeset:   377329e2bc14
Branch:      yt
User:        xarthisius
Date:        2015-09-21 15:13:38+00:00
Summary:     Handle ascii auxiliary files
Affected #:  1 file

diff -r 7f8c17b6c5adf7b8728c490b2086db8ab036ee5c -r 377329e2bc146a23a95e8a96dbe5035418dcf958 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -146,13 +146,13 @@
                 afields = list(set(field_list).intersection(self._aux_fields))
                 for afield in afields:
                     aux_fh[afield].seek(
-                        aux_fields_offsets[afield][ptype], os.SEEK_SET)
+                        aux_fields_offsets[afield][ptype][0], os.SEEK_SET)
 
                 total = 0
                 while total < tp[ptype]:
                     count = min(self._chunksize, tp[ptype] - total)
                     p = np.fromfile(f, self._pdtypes[ptype], count=count)
-                    total += p.size
+
                     auxdata = []
                     for afield in afields:
                         if isinstance(self._aux_pdtypes[afield], np.dtype):
@@ -162,10 +162,20 @@
                                             count=count)
                             )
                         else:
-                            auxdata.append(
-                                np.genfromtxt(aux_fh[afield], skip_header=1,
-                                              count=count)
-                            )
+                            aux_fh[afield].seek(0, os.SEEK_SET)
+                            sh = aux_fields_offsets[afield][ptype][0]
+                            sf = aux_fields_offsets[afield][ptype][1]
+                            if tp[ptype] > 0:
+                                aux = np.genfromtxt(
+                                    aux_fh[afield], skip_header=sh,
+                                    skip_footer=sf
+                                )
+                                if aux.ndim < 1:
+                                    aux = np.array([aux])
+                                auxdata.append(aux[total:total + count])
+                                del aux
+
+                    total += p.size
                     if afields:
                         p = append_fields(p, afields, auxdata)
                     mask = selector.select_points(
@@ -356,17 +366,21 @@
 
     def _calculate_particle_offsets_aux(self, data_file):
         aux_fields_offsets = {}
+        tp = data_file.total_particles
         for afield in self._aux_fields:
+            aux_fields_offsets[afield] = {}
             if isinstance(self._aux_pdtypes[afield], np.dtype):
                 pos = 4  # i4
-                aux_fields_offsets[afield] = {}
                 for ptype in self._ptypes:
-                    aux_fields_offsets[afield][ptype] = pos
+                    aux_fields_offsets[afield][ptype] = (pos, 0)
                     if data_file.total_particles[ptype] == 0:
                         continue
                     size = np.dtype(self._aux_pdtypes[afield]).itemsize
                     pos += data_file.total_particles[ptype] * size
             else:
-                # handle gentext case
-                raise RuntimeError
+                aux_fields_offsets[afield].update(
+                    {'DarkMatter': (1, tp["Gas"] + tp["Stars"]),
+                     'Gas': (1 + tp["DarkMatter"], tp["Stars"]),
+                     'Stars': (1 + tp["DarkMatter"] + tp["Gas"], 0)}
+                )
         return aux_fields_offsets


https://bitbucket.org/yt_analysis/yt/commits/c06ac3f049d0/
Changeset:   c06ac3f049d0
Branch:      yt
User:        xarthisius
Date:        2015-09-21 15:18:49+00:00
Summary:     Calculate accurate skip_header and skip_footer to avoid slicing of big array
Affected #:  1 file

diff -r 377329e2bc146a23a95e8a96dbe5035418dcf958 -r c06ac3f049d0c8fd90c3edb1c624fb0ee644c0fe yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -163,8 +163,9 @@
                             )
                         else:
                             aux_fh[afield].seek(0, os.SEEK_SET)
-                            sh = aux_fields_offsets[afield][ptype][0]
-                            sf = aux_fields_offsets[afield][ptype][1]
+                            sh = aux_fields_offsets[afield][ptype][0] + total
+                            sf = aux_fields_offsets[afield][ptype][1] + \
+                                tp[ptype] - count
                             if tp[ptype] > 0:
                                 aux = np.genfromtxt(
                                     aux_fh[afield], skip_header=sh,
@@ -172,8 +173,7 @@
                                 )
                                 if aux.ndim < 1:
                                     aux = np.array([aux])
-                                auxdata.append(aux[total:total + count])
-                                del aux
+                                auxdata.append(aux)
 
                     total += p.size
                     if afields:


https://bitbucket.org/yt_analysis/yt/commits/973d8198545e/
Changeset:   973d8198545e
Branch:      yt
User:        xarthisius
Date:        2015-09-21 16:21:11+00:00
Summary:     Handle integer auxiliary fields
Affected #:  1 file

diff -r c06ac3f049d0c8fd90c3edb1c624fb0ee644c0fe -r 973d8198545e2eb85fa48d6e6a5e64c7b88d2a39 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -339,7 +339,10 @@
             elif (filesize - 4) / 8 == tot_parts:
                 self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'd')])
             elif (filesize - 4) / 4 == tot_parts:
-                self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'f')])
+                if afield.startswith("i"):
+                    self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'i')])
+                else:
+                    self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'f')])
             else:
                 raise RuntimeError
 


https://bitbucket.org/yt_analysis/yt/commits/e5b0df989a83/
Changeset:   e5b0df989a83
Branch:      yt
User:        xarthisius
Date:        2015-09-21 16:26:44+00:00
Summary:     Use 'self._ptypes' instead of hardcoding ptypes
Affected #:  1 file

diff -r 973d8198545e2eb85fa48d6e6a5e64c7b88d2a39 -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -347,7 +347,7 @@
                 raise RuntimeError
 
         # Add the auxiliary fields to each ptype we have
-        for ptype in ["Gas", "DarkMatter", "Stars"]:
+        for ptype in self._ptypes:
             if any([ptype == field[0] for field in self._field_list]):
                 self._field_list += \
                     [(ptype, afield) for afield in self._aux_fields]


https://bitbucket.org/yt_analysis/yt/commits/6a44fa11e59f/
Changeset:   6a44fa11e59f
Branch:      yt
User:        BW Keller
Date:        2015-09-21 19:07:07+00:00
Summary:     Manual merge.  Good to go with PR 1735.
Affected #:  58 files

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,10 +1,10 @@
-include distribute_setup.py README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
-recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
+recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
 include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
 include doc/extensions/README doc/Makefile
@@ -12,5 +12,3 @@
 prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
 prune yt/frontends/_skeleton
-prune tests
-exclude clean.sh .hgchurn

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -111,7 +111,7 @@
 
 .. code-block:: python
 
-    @yt.particle_filter(requires=["particle_type], filtered_type='all')
+    @yt.particle_filter(requires=["particle_type"], filtered_type='all')
     def stars(pfilter, data):
         filter = data[(pfilter.filtered_type, "particle_type")] == 2
         return filter

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d19ee42177c60fb4b39550b5acd7a0f7e97f59f5c2da3565ff42cdd580454b0"
+  "signature": "sha256:6a06d5720eb6316ac0d322ef0898ec20f33d65ea3eeeacef35ae1d869af12607"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -352,7 +352,7 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "Round-Trip Conversions to and from AstroPy's Units System"
+      "Round-Trip Conversions to and from Other Unit Systems"
      ]
     },
     {
@@ -503,6 +503,58 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also do the same thing with unitful quantities from the [Pint package](http://pint.readthedocs.org), using essentially the same procedure:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from pint import UnitRegistry\n",
+      "ureg = UnitRegistry()\n",
+      "v = 1000.*ureg.km/ureg.s\n",
+      "w = yt.YTQuantity.from_pint(v)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print v, type(v)\n",
+      "print w, type(w)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ptemp = temp.to_pint()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print temp, type(temp)\n",
+      "print ptemp, type(ptemp)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -84,6 +84,9 @@
 * :func:`~yt.testing.assert_equal` can operate on arrays.
 * :func:`~yt.testing.assert_almost_equal` can operate on arrays and accepts a
   relative allowable difference.
+* :func:`~yt.testing.assert_allclose_units` raises an error if two arrays are
+  not equal up to a desired absolute or relative tolerance. This wraps numpy's
+  assert_allclose to correctly verify unit consistency as well.
 * :func:`~yt.testing.amrspace` provides the ability to create AMR grid
   structures.
 * :func:`~yt.testing.expand_keywords` provides the ability to iterate over
@@ -99,9 +102,10 @@
 #. Inside that directory, create a new python file prefixed with ``test_`` and
    including the name of the functionality.
 #. Inside that file, create one or more routines prefixed with ``test_`` that
-   accept no arguments.  These should ``yield`` a set of values of the form
-   ``function``, ``arguments``.  For example ``yield assert_equal, 1.0, 1.0``
-   would evaluate that 1.0 equaled 1.0.
+   accept no arguments.  These should ``yield`` a tuple of the form
+   ``function``, ``argument_one``, ``argument_two``, etc.  For example
+   ``yield assert_equal, 1.0, 1.0`` would be captured by nose as a test that
+   asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
    several combinations of ``nproc``, so that domain decomposition can be
    tested as well.
@@ -113,6 +117,53 @@
 ``yt/data_objects/tests/test_covering_grid.py``, which covers a great deal of
 functionality.
 
+Debugging failing tests
+^^^^^^^^^^^^^^^^^^^^^^^
+
+When writing new tests, often one exposes bugs or writes a test incorrectly,
+causing an exception to be raised or a failed test. To help debug issues like
+this, ``nose`` can drop into a debugger whenever a test fails or raises an
+exception. This can be accomplished by passing ``--pdb`` and ``--pdb-failures``
+to the ``nosetests`` executable. These options will drop into the pdb debugger
+whenever an error is raised or a failure happens, respectively. Inside the
+debugger you can interactively print out variables and go up and down the call
+stack to determine the context for your failure or error.
+
+.. code-block:: bash
+
+    nosetests --pdb --pdb-failures
+
+In addition, one can debug more crudely using print statements. To do this,
+you can add print statements to the code as normal. However, the test runner
+will capture all print output by default. To ensure that output gets printed
+to your terminal while the tests are running, pass ``-s`` to the ``nosetests``
+executable.
+
+Lastly, to quickly debug a specific failing test, it is best to only run that
+one test during your testing session. This can be accomplished by explicitly
+passing the name of the test function or class to ``nosetests``, as in the
+following example:
+
+.. code-block:: bash
+
+    $ nosetests yt.visualization.tests.test_plotwindow:TestSetWidth
+
+This nosetests invocation will only run the tests defined by the
+``TestSetWidth`` class.
+
+Finally, to determine which test is failing while the tests are running, it helps
+to run the tests in "verbose" mode. This can be done by passing the ``-v`` option
+to the ``nosetests`` executable.
+
+All of the above ``nosetests`` options can be combined. So, for example to run
+the ``TestSetWidth`` tests with verbose output, letting the output of print
+statements come out on the terminal prompt, and enabling pdb debugging on errors
+or test failures, one would do:
+
+.. code-block:: bash
+
+    $ nosetests --pdb --pdb-failures -v -s yt.visualization.tests.test_plotwindow:TestSetWidth
+
 .. _answer_testing:
 
 Answer Testing
@@ -122,8 +173,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^
 
 Answer tests test **actual data**, and many operations on that data, to make
-sure that answers don't drift over time.  This is how we will be testing
-frontends, as opposed to operations, in yt.
+sure that answers don't drift over time.  This is how we test frontends, as
+opposed to operations, in yt.
 
 .. _run_answer_testing:
 
@@ -133,20 +184,104 @@
 The very first step is to make a directory and copy over the data against which
 you want to test.  Currently, we test:
 
+NMSU ART
+~~~~~~~~
+
+* ``D9p_500/10MpcBox_HartGal_csf_a0.500.d``
+
+ARTIO
+~~~~~
+
+* ``sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art``
+
+Athena
+~~~~~~
+
+* ``ShockCloud/id0/Cloud.0050.vtk``
+* ``MHDBlast/id0/Blast.0100.vtk``
+* ``RamPressureStripping/id0/rps.0062.vtk``
+* ``MHDSloshing/virgo_low_res.0054.vtk``
+
+Boxlib
+~~~~~~
+
+* ``RadAdvect/plt00000``
+* ``RadTube/plt00500``
+* ``StarParticles/plrd01000``
+
+Chombo
+~~~~~~
+
+* ``TurbBoxLowRes/data.0005.3d.hdf5``
+* ``GaussianCloud/data.0077.3d.hdf5``
+* ``IsothermalSphere/data.0000.3d.hdf5``
+* ``ZeldovichPancake/plt32.2d.hdf5``
+* ``KelvinHelmholtz/data.0004.hdf5``
+
+Enzo
+~~~~
+
 * ``DD0010/moving7_0010`` (available in ``tests/`` in the yt distribution)
 * ``IsolatedGalaxy/galaxy0030/galaxy0030``
+* ``enzo_tiny_cosmology/DD0046/DD0046``
+* ``enzo_cosmology_pluts/DD0046/DD0046``
+
+FITS
+~~~~
+
+* ``radio_fits/grs-50-cube.fits``
+* ``UnigridData/velocity_field_20.fits``
+
+FLASH
+~~~~~
+
 * ``WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030``
 * ``GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300``
-* ``TurbBoxLowRes/data.0005.3d.hdf5``
-* ``GaussianCloud/data.0077.3d.hdf5``
-* ``RadAdvect/plt00000``
-* ``RadTube/plt00500``
+
+Gadget
+~~~~~~
+
+* ``IsothermalCollapse/snap_505``
+* ``IsothermalCollapse/snap_505.hdf5``
+* ``GadgetDiskGalaxy/snapshot_200.hdf5``
+
+Halo Catalog
+~~~~~~~~~~~~
+
+* ``owls_fof_halos/groups_001/group_001.0.hdf5``
+* ``owls_fof_halos/groups_008/group_008.0.hdf5``
+* ``gadget_fof_halos/groups_005/fof_subhalo_tab_005.0.hdf5``
+* ``gadget_fof_halos/groups_042/fof_subhalo_tab_042.0.hdf5``
+* ``rockstar_halos/halos_0.0.bin``
+
+MOAB
+~~~~
+
+* ``c5/c5.h5m``
+
+
+RAMSES
+~~~~~~
+
+* ``output_00080/info_00080.txt``
+
+Tipsy
+~~~~~
+
+* ``halo1e11_run1.00400/halo1e11_run1.00400``
+* ``agora_1e11.00400/agora_1e11.00400``
+* ``TipsyGalaxy/galaxy.00300``
+
+OWLS
+~~~~
+
+* ``snapshot_033/snap_033.0.hdf5``
 
 These datasets are available at http://yt-project.org/data/.
 
 Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
 with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to compare.  Here is an example
+directory with the test data you want to test with.  Here is an example
 config file:
 
 .. code-block:: none
@@ -154,47 +289,45 @@
    [yt]
    test_data_dir = /Users/tomservo/src/yt-data
 
-More data will be added over time.  To run the tests, you can import the yt
-module and invoke ``yt.run_nose()`` with a new keyword argument:
+More data will be added over time.  To run the answer tests, you must first
+generate a set of test answers locally on a "known good" revision, then update
+to the revision you want to test, and run the tests again using the locally
+stored answers.
 
-.. code-block:: python
-
-   import yt
-   yt.run_nose(run_answer_tests=True)
-
-If you have installed yt using ``python setup.py develop`` you can also
-optionally invoke nose using the ``nosetests`` command line interface:
+Let's focus on running the answer tests for a single frontend. It's possible to
+run the answer tests for **all** the frontends, but due to the large number of
+test datasets we currently use this is not normally done except on the yt
+project's contiguous integration server.
 
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store frontends.tipsy
 
-In either case, the current gold standard results will be downloaded from the
-rackspace cloud and compared to what is generated locally.  The results from a
-nose testing session are pretty straightforward to understand, the results for
-each test are printed directly to STDOUT. If a test passes, nose prints a
-period, F if a test fails, and E if the test encounters an exception or errors
-out for some reason.  If you want to also run tests for the 'big' datasets,
-then you can use the ``answer_big_data`` keyword argument:
-
-.. code-block:: python
-
-   import yt
-   yt.run_nose(run_answer_tests=True, answer_big_data=True)
-
-or, in the base directory of the yt mercurial repository:
+This command will create a set of local answers from the tipsy frontend tests
+and store them in ``$HOME/Documents/test`` (this can but does not have to be the
+same directory as the ``test_data_dir`` configuration variable defined in your
+``.yt/config`` file). To run the tipsy frontend's answer tests using a different
+yt changeset, update to that changeset, recompile if necessary, and run the
+tests using the following command:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --answer-big-data
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test frontends.tipsy
 
-It's also possible to only run the answer tests for one frontend.  For example,
-to run only the enzo answers tests, one can do,
+The results from a nose testing session are pretty straightforward to
+understand, the results for each test are printed directly to STDOUT.  If a test
+passes, nose prints a period, F if a test fails, and E if the test encounters an
+exception or errors out for some reason.  Explicit descriptions for each test
+are also printed if you pass ``-v`` to the ``nosetests`` executable.  If you
+want to also run tests for the 'big' datasets, then you will need to pass
+``--answer-big-data`` to ``nosetests``.  For example, to run the tests for the
+OWLS frontend, do the following:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing yt.frontends.enzo
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-big-data frontends.owls
+
 
 How to Write Answer Tests
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -260,38 +393,21 @@
   directory.
 
 * Create a new routine that operates similarly to the routines you can see
-  in Enzo's outputs.
+  in Enzo's output tests.
 
   * This routine should test a number of different fields and data objects.
 
   * The test routine itself should be decorated with
-    ``@requires_ds(file_name)``  This decorate can accept the argument
-    ``big_data`` for if this data is too big to run all the time.
+    ``@requires_ds(path_to_test_dataset)``. This decorator can accept the
+    argument ``big_data=True`` if the test is expensive.
 
-  * There are ``small_patch_amr`` and ``big_patch_amr`` routines that
-    you can yield from to execute a bunch of standard tests.  This is where
-    you should start, and then yield additional tests that stress the
-    outputs in whatever ways are necessary to ensure functionality.
+  * There are ``small_patch_amr`` and ``big_patch_amr`` routines that you can
+    yield from to execute a bunch of standard tests. In addition we have created
+    ``sph_answer`` which is more suited for particle SPH datasets. This is where
+    you should start, and then yield additional tests that stress the outputs in
+    whatever ways are necessary to ensure functionality.
 
   * **All tests should be yielded!**
 
 If you are adding to a frontend that has a few tests already, skip the first
 two steps.
-
-How to Upload Answers
-^^^^^^^^^^^^^^^^^^^^^
-
-To upload answers you can execute this command:
-
-.. code-block:: bash
-
-   $ nosetests --with-answer-testing frontends/enzo/ --answer-store --answer-name=whatever
-
-The current version of the gold standard can be found in the variable
-``_latest`` inside ``yt/utilities/answer_testing/framework.py``  As of
-the time of this writing, it is ``gold007``  Note that the name of the
-suite of results is now disconnected from the dataset's name, so you
-can upload multiple outputs with the same name and not collide.
-
-To upload answers, you **must** have the package boto installed, and you
-**must** have an Amazon key provided by Matt.  Contact Matt for these keys.

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1257,8 +1257,8 @@
 
 .. _specifying-cosmology-tipsy:
 
-Specifying Tipsy Cosmological Parameters
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Specifying Tipsy Cosmological Parameters and Setting Default Units
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Cosmological parameters can be specified to Tipsy to enable computation of
 default units.  The parameters recognized are of this form:
@@ -1270,5 +1270,27 @@
                            'omega_matter': 0.272,
                            'hubble_constant': 0.702}
 
-These will be used set the units, if they are specified.
+If you wish to set the default units directly, you can do so by using the
+``unit_base`` keyword in the load statement.
 
+ .. code-block:: python
+
+    import yt
+    ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')})
+
+
+Loading Cosmological Simulations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you are not using a parameter file (i.e. non-Gasoline users), then you must
+use keyword ``cosmology_parameters`` when loading your data set to indicate to
+yt that it is a cosmological data set. If you do not wish to set any
+non-default cosmological parameters, you may pass an empty dictionary.
+
+ .. code-block:: python
+
+    import yt
+    ds = yt.load(filename, cosmology_parameters={})
+
+
+

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -32,18 +32,6 @@
     match, at which point it returns an instance of the appropriate
     :class:`yt.data_objects.api.Dataset` subclass.
     """
-    if len(args) == 0:
-        try:
-            from yt.extern.six.moves import tkinter
-            import tkinter, tkFileDialog
-        except ImportError:
-            raise YTOutputNotIdentified(args, kwargs)
-        root = tkinter.Tk()
-        filename = tkFileDialog.askopenfilename(parent=root,title='Choose a file')
-        if filename != None:
-            return load(filename)
-        else:
-            raise YTOutputNotIdentified(args, kwargs)
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, str)
             else arg for arg in args]
@@ -100,32 +88,6 @@
         mylog.error("    Possible: %s", c)
     raise YTOutputNotIdentified(args, kwargs)
 
-def projload(ds, axis, weight_field = None):
-    # This is something of a hack, so that we can just get back a projection
-    # and not utilize any of the intermediate index objects.
-    class ProjMock(dict):
-        pass
-    import h5py
-    f = h5py.File(os.path.join(ds.fullpath, ds.parameter_filename + ".yt"))
-    b = f["/Projections/%s/" % (axis)]
-    wf = "weight_field_%s" % weight_field
-    if wf not in b: raise KeyError(wf)
-    fields = []
-    for k in b:
-        if k.startswith("weight_field"): continue
-        if k.endswith("_%s" % weight_field):
-            fields.append(k)
-    proj = ProjMock()
-    for f in ["px","py","pdx","pdy"]:
-        proj[f] = b[f][:]
-    for f in fields:
-        new_name = f[:-(len(weight_field) + 1)]
-        proj[new_name] = b[f][:]
-    proj.axis = axis
-    proj.ds = ds
-    f.close()
-    return proj
-
 def simulation(parameter_filename, simulation_type, find_outputs=False):
     """
     Loads a simulation time series object of the specified

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -136,6 +136,8 @@
         # that dx=dy=dz, at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if self.Parent is not None:
+            if not hasattr(self.Parent, 'dds'):
+                self.Parent._setup_dx()
             self.dds = self.Parent.dds.ndarray_view() / self.ds.refine_by
         else:
             LE, RE = self.index.grid_left_edge[id,:], \

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -159,7 +159,7 @@
             indexed octree will be constructed on these particles.
         fields : list of arrays
             All the necessary fields for computing the particle operation.  For
-            instance, this might include mass, velocity, etc.  
+            instance, this might include mass, velocity, etc.
         method : string
             This is the "method name" which will be looked up in the
             `particle_deposit` namespace as `methodname_deposit`.  Current
@@ -212,7 +212,7 @@
             indexed octree will be constructed on these particles.
         fields : list of arrays
             All the necessary fields for computing the particle operation.  For
-            instance, this might include mass, velocity, etc.  
+            instance, this might include mass, velocity, etc.
         index_fields : list of arrays
             All of the fields defined on the mesh that may be used as input to
             the operation.
@@ -265,11 +265,14 @@
         op.initialize()
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])
-        op.process_octree(self.oct_handler, mdom_ind, positions, 
+        # Pointer operations within 'process_octree' require arrays to be
+        # contiguous cf. https://bitbucket.org/yt_analysis/yt/issues/1079
+        fields = [np.ascontiguousarray(f, dtype="float64") for f in fields]
+        op.process_octree(self.oct_handler, mdom_ind, positions,
             self.fcoords, fields,
             self.domain_id, self._domain_offset, self.ds.periodicity,
             index_fields, particle_octree, pdom_ind, self.ds.geometry)
-        # If there are 0s in the smoothing field this will not throw an error, 
+        # If there are 0s in the smoothing field this will not throw an error,
         # but silently return nans for vals where dividing by 0
         # Same as what is currently occurring, but suppressing the div by zero
         # error.
@@ -342,7 +345,7 @@
         op.initialize()
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])
-        op.process_particles(particle_octree, pdom_ind, positions, 
+        op.process_particles(particle_octree, pdom_ind, positions,
             fields, self.domain_id, self._domain_offset, self.ds.periodicity,
             self.ds.geometry)
         vals = op.finalize()
@@ -494,7 +497,7 @@
         LE -= np.abs(LE) * eps
         RE = self.max(axis=0)
         RE += np.abs(RE) * eps
-        octree = ParticleOctreeContainer(dims, LE, RE, 
+        octree = ParticleOctreeContainer(dims, LE, RE,
             over_refine = over_refine_factor)
         octree.n_ref = n_ref
         octree.add(mi)

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -691,8 +691,7 @@
 
         """
         from yt.units.dimensions import length
-        if hasattr(self, "cosmological_simulation") \
-           and getattr(self, "cosmological_simulation"):
+        if getattr(self, "cosmological_simulation", False):
             # this dataset is cosmological, so add cosmological units.
             self.unit_registry.modify("h", self.hubble_constant)
             # Comoving lengths
@@ -705,16 +704,15 @@
 
         self.set_code_units()
 
-        if hasattr(self, "cosmological_simulation") \
-           and getattr(self, "cosmological_simulation"):
+        if getattr(self, "cosmological_simulation", False):
             # this dataset is cosmological, add a cosmology object
-            setattr(self, "cosmology",
+            self.cosmology = \
                     Cosmology(hubble_constant=self.hubble_constant,
                               omega_matter=self.omega_matter,
                               omega_lambda=self.omega_lambda,
-                              unit_registry=self.unit_registry))
-            setattr(self, "critical_density",
-                    self.cosmology.critical_density(self.current_redshift))
+                              unit_registry=self.unit_registry)
+            self.critical_density = \
+                    self.cosmology.critical_density(self.current_redshift)
             self.scale_factor = 1.0 / (1.0 + self.current_redshift)
 
     def get_unit_from_registry(self, unit_str):

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -156,7 +156,7 @@
     def __iter__(self):
         # We can make this fancier, but this works
         for o in self._pre_outputs:
-            if isinstance(o, str):
+            if isinstance(o, string_types):
                 ds = load(o, **self.kwargs)
                 self._setup_function(ds)
                 yield ds
@@ -170,7 +170,7 @@
             # This will return a sliced up object!
             return DatasetSeries(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
-        if isinstance(o, str):
+        if isinstance(o, string_types):
             o = load(o, **self.kwargs)
             self._setup_function(o)
         return o
@@ -248,13 +248,31 @@
 
         """
         dynamic = False
-        if self.parallel == False:
+        if self.parallel is False:
             njobs = 1
         else:
-            if self.parallel == True: njobs = -1
-            else: njobs = self.parallel
-        return parallel_objects(self, njobs=njobs, storage=storage,
-                                dynamic=dynamic)
+            if self.parallel is True:
+                njobs = -1
+            else:
+                njobs = self.parallel
+
+        for output in parallel_objects(self._pre_outputs, njobs=njobs,
+                                       storage=storage, dynamic=dynamic):
+            if storage is not None:
+                sto, output = output
+
+            if isinstance(output, string_types):
+                ds = load(output, **self.kwargs)
+                self._setup_function(ds)
+            else:
+                ds = output
+
+            if storage is not None:
+                next_ret = (sto, ds)
+            else:
+                next_ret = ds
+
+            yield next_ret
 
     def eval(self, tasks, obj=None):
         tasks = ensure_list(tasks)
@@ -323,13 +341,13 @@
 
         """
         
-        if isinstance(filenames, str):
+        if isinstance(filenames, string_types):
             filenames = get_filenames_from_glob_pattern(filenames)
 
         # This will crash with a less informative error if filenames is not
         # iterable, but the plural keyword should give users a clue...
         for fn in filenames:
-            if not isinstance(fn, str):
+            if not isinstance(fn, string_types):
                 raise YTOutputNotIdentified("DataSeries accepts a list of "
                                             "strings, but "
                                             "received {0}".format(fn))

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -15,12 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import types
 import numpy as np
-import inspect
-import copy
-
-from yt.units.yt_array import YTArray
 
 from .derived_field import \
     ValidateParameter
@@ -29,8 +24,8 @@
     register_field_plugin
 
 from .vector_operations import \
-     create_magnitude_field
-    
+    create_magnitude_field
+
 from yt.utilities.lib.geometry_utils import \
     obtain_rvec, obtain_rv_vec
 
@@ -78,7 +73,7 @@
 
     create_magnitude_field(registry, "specific_angular_momentum",
                            "cm**2 / s", ftype=ftype)
-    
+
     def _angular_momentum_x(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_x"]
@@ -105,4 +100,3 @@
 
     create_magnitude_field(registry, "angular_momentum",
                            "g * cm**2 / s", ftype=ftype)
-                           

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -16,8 +16,7 @@
 import numpy as np
 
 from .derived_field import \
-    ValidateParameter, \
-    ValidateSpatial
+    ValidateParameter
 from .field_exceptions import \
     NeedsParameter
 from .field_plugin_registry import \
@@ -30,7 +29,7 @@
     clight, \
     kboltz, \
     G
-    
+
 @register_field_plugin
 def setup_astro_fields(registry, ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.
@@ -45,7 +44,7 @@
         div_fac = 2.0
     else:
         sl_left, sl_right, div_fac = slice_info
-    
+
     def _dynamical_time(field, data):
         """
         sqrt(3 pi / (16 G rho))
@@ -71,7 +70,7 @@
 
     def _chandra_emissivity(field, data):
         logT0 = np.log10(data[ftype, "temperature"].to_ndarray().astype(np.float64)) - 7
-        # we get rid of the units here since this is a fit and not an 
+        # we get rid of the units here since this is a fit and not an
         # analytical expression
         return data.ds.arr(data[ftype, "number_density"].to_ndarray().astype(np.float64)**2
                            * (10**(- 0.0103 * logT0**8 + 0.0417 * logT0**7
@@ -91,7 +90,7 @@
     registry.add_field((ftype, "chandra_emissivity"),
                        function=_chandra_emissivity,
                        units="") # add correct units here
-    
+
     def _xray_emissivity(field, data):
         # old scaling coefficient was 2.168e60
         return data.ds.arr(data[ftype, "density"].to_ndarray().astype(np.float64)**2
@@ -110,7 +109,7 @@
     registry.add_field((ftype,"mazzotta_weighting"),
                        function=_mazzotta_weighting,
                        units="keV**-0.25*cm**-6")
-    
+
     def _sz_kinetic(field, data):
         scale = 0.88 * sigma_thompson / mh / clight
         vel_axis = data.get_field_parameter("axis")

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/astro_simulations.py
--- a/yt/fields/astro_simulations.py
+++ b/yt/fields/astro_simulations.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 from .domain_context import DomainContext
 
 # Here's how this all works:

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -14,21 +14,17 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
+from .derived_field import \
+    ValidateParameter
+from .field_exceptions import \
+    NeedsConfiguration, \
+    NeedsParameter
+from .field_plugin_registry import \
+    register_field_plugin
 
-from .derived_field import \
-     ValidateParameter
-from .field_exceptions import \
-     NeedsConfiguration, \
-     NeedsParameter
-from .field_plugin_registry import \
-     register_field_plugin
+from yt.utilities.physical_constants import \
+    speed_of_light_cgs
 
-from yt.utilities.cosmology import \
-     Cosmology
-from yt.utilities.physical_constants import \
-     speed_of_light_cgs
-    
 @register_field_plugin
 def setup_cosmology_fields(registry, ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.
@@ -49,7 +45,7 @@
           data[ftype, "dark_matter_density"]
 
     registry.add_field((ftype, "matter_density"),
-                       function=_matter_density, 
+                       function=_matter_density,
                        units="g/cm**3")
 
     def _matter_mass(field, data):
@@ -67,7 +63,7 @@
         co = data.ds.cosmology
         return data[ftype, "matter_density"] / \
           co.critical_density(data.ds.current_redshift)
-    
+
     registry.add_field((ftype, "overdensity"),
                        function=_overdensity,
                        units="")
@@ -116,7 +112,7 @@
                        function=_virial_radius_fraction,
                        validators=[ValidateParameter("virial_radius")],
                        units="")
-    
+
     # Weak lensing convergence.
     # Eqn 4 of Metzler, White, & Loken (2001, ApJ, 547, 560).
     # This needs to be checked for accuracy.
@@ -127,7 +123,7 @@
         co = data.ds.cosmology
         observer_redshift = data.get_field_parameter('observer_redshift')
         source_redshift = data.get_field_parameter('source_redshift')
-        
+
         # observer to lens
         dl = co.angular_diameter_distance(observer_redshift, data.ds.current_redshift)
         # observer to source
@@ -135,11 +131,11 @@
         # lens to source
         dls = co.angular_diameter_distance(data.ds.current_redshift, source_redshift)
 
-        # removed the factor of 1 / a to account for the fact that we are projecting 
+        # removed the factor of 1 / a to account for the fact that we are projecting
         # with a proper distance.
         return (1.5 * (co.hubble_constant / speed_of_light_cgs)**2 * (dl * dls / ds) * \
           data[ftype, "matter_overdensity"]).in_units("1/cm")
-       
+
     registry.add_field((ftype, "weak_lensing_convergence"),
                        function=_weak_lensing_convergence,
                        units="1/cm",

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -16,10 +16,7 @@
 
 from yt.funcs import \
     ensure_list
-from yt.units.yt_array import \
-    YTArray
 from .field_exceptions import \
-    ValidationException, \
     NeedsGridType, \
     NeedsOriginalGrid, \
     NeedsDataField, \
@@ -30,15 +27,9 @@
     FieldDetector
 from yt.units.unit_object import \
     Unit
+from yt.utilities.exceptions import \
+    YTFieldNotFound
 
-def derived_field(**kwargs):
-    def inner_decorator(function):
-        if 'name' not in kwargs:
-            kwargs['name'] = function.__name__
-        kwargs['function'] = function
-        add_field(**kwargs)
-        return function
-    return inner_decorator
 
 def TranslationFunc(field_name):
     def _TranslationFunc(field, data):
@@ -48,7 +39,7 @@
 
 def NullFunc(field, data):
     raise YTFieldNotFound(field.name)
- 
+
 class DerivedField(object):
     """
     This is the base class used to describe a cell-by-cell derived field.
@@ -178,7 +169,7 @@
 
     def __call__(self, data):
         """ Return the value of the field in a given *data* object. """
-        ii = self.check_available(data)
+        self.check_available(data)
         original_fields = data.keys() # Copy
         if self._function is NullFunc:
             raise RuntimeError(

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/domain_context.py
--- a/yt/fields/domain_context.py
+++ b/yt/fields/domain_context.py
@@ -14,8 +14,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 domain_context_registry = {}
 
 class DomainContext(object):

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -15,16 +15,9 @@
 
 import numpy as np
 from collections import defaultdict
-from yt.units.unit_object import Unit
 from yt.units.yt_array import YTArray
 from .field_exceptions import \
-    ValidationException, \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter, \
-    FieldUnitsError
+    NeedsGridType
 
 class FieldDetector(defaultdict):
     Level = 1
@@ -87,27 +80,18 @@
         return arr.reshape(self.ActiveDimensions, order="C")
 
     def __missing__(self, item):
-        if hasattr(self.ds, "field_info"):
-            if not isinstance(item, tuple):
-                field = ("unknown", item)
-                finfo = self.ds._get_field_info(*field)
-                #mylog.debug("Guessing field %s is %s", item, finfo.name)
-            else:
-                field = item
-            finfo = self.ds._get_field_info(*field)
-            # For those cases where we are guessing the field type, we will
-            # need to re-update -- otherwise, our item will always not have the
-            # field type.  This can lead to, for instance, "unknown" particle
-            # types not getting correctly identified.
-            # Note that the *only* way this works is if we also fix our field
-            # dependencies during checking.  Bug #627 talks about this.
-            item = self.ds._last_freq
+        if not isinstance(item, tuple):
+            field = ("unknown", item)
         else:
-            FI = getattr(self.ds, "field_info", FieldInfo)
-            if item in FI:
-                finfo = FI[item]
-            else:
-                finfo = None
+            field = item
+        finfo = self.ds._get_field_info(*field)
+        # For those cases where we are guessing the field type, we will
+        # need to re-update -- otherwise, our item will always not have the
+        # field type.  This can lead to, for instance, "unknown" particle
+        # types not getting correctly identified.
+        # Note that the *only* way this works is if we also fix our field
+        # dependencies during checking.  Bug #627 talks about this.
+        item = self.ds._last_freq
         if finfo is not None and finfo._function.__name__ != 'NullFunc':
             try:
                 vv = finfo(self)
@@ -171,10 +155,7 @@
 
     def _read_data(self, field_name):
         self.requested.append(field_name)
-        if hasattr(self.ds, "field_info"):
-            finfo = self.ds._get_field_info(*field_name)
-        else:
-            finfo = FieldInfo[field_name]
+        finfo = self.ds._get_field_info(*field_name)
         if finfo.particle_type:
             self.requested.append(field_name)
             return np.ones(self.NumberOfParticles)

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/field_exceptions.py
--- a/yt/fields/field_exceptions.py
+++ b/yt/fields/field_exceptions.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 
 class ValidationException(Exception):
     pass

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -32,7 +32,7 @@
         # it from a cm**2 array.
         np.subtract(data["%s%s" % (field_prefix, ax)].in_units("cm"),
                     center[i], r)
-        if data.ds.periodicity[i] == True:
+        if data.ds.periodicity[i] is True:
             np.abs(r, r)
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/field_plugin_registry.py
--- a/yt/fields/field_plugin_registry.py
+++ b/yt/fields/field_plugin_registry.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 field_plugins = {}
 
 def register_field_plugin(func):

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -15,20 +15,16 @@
 
 import numpy as np
 
-from yt.funcs import \
-    just_one
-
 from .derived_field import \
-    ValidateParameter, \
     ValidateSpatial
 
 from .field_plugin_registry import \
     register_field_plugin
 
 from .vector_operations import \
-     create_averaged_field, \
-     create_magnitude_field, \
-     create_vector_fields
+    create_averaged_field, \
+    create_magnitude_field, \
+    create_vector_fields
 
 from yt.utilities.physical_constants import \
     mh, \
@@ -37,20 +33,6 @@
 from yt.utilities.physical_ratios import \
     metallicity_sun
 
-from yt.units.yt_array import \
-    YTArray
-
-from yt.utilities.math_utils import \
-    get_sph_r_component, \
-    get_sph_theta_component, \
-    get_sph_phi_component, \
-    get_cyl_r_component, \
-    get_cyl_z_component, \
-    get_cyl_theta_component, \
-    get_cyl_r, get_cyl_theta, \
-    get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi, \
-    periodic_dist, euclidean_dist
 
 @register_field_plugin
 def setup_fluid_fields(registry, ftype = "gas", slice_info = None):

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/fluid_vector_fields.py
--- a/yt/fields/fluid_vector_fields.py
+++ b/yt/fields/fluid_vector_fields.py
@@ -16,10 +16,7 @@
 import numpy as np
 
 from yt.fields.derived_field import \
-    ValidateGridType, \
-    ValidateParameter, \
-    ValidateSpatial, \
-    NeedsParameter
+    ValidateSpatial
 
 from .field_plugin_registry import \
     register_field_plugin
@@ -28,8 +25,8 @@
     just_one
 
 from .vector_operations import \
-     create_magnitude_field, \
-     create_squared_field
+    create_magnitude_field, \
+    create_squared_field
 
 @register_field_plugin
 def setup_fluid_vector_fields(registry, ftype = "gas", slice_info = None):

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/interpolated_fields.py
--- a/yt/fields/interpolated_fields.py
+++ b/yt/fields/interpolated_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from yt.fields.local_fields import add_field
 
 from yt.utilities.linear_interpolators import \

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/local_fields.py
--- a/yt/fields/local_fields.py
+++ b/yt/fields/local_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from yt.utilities.logger import \
     ytLogger as mylog
 

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -15,11 +15,6 @@
 
 import numpy as np
 
-from yt.units.yt_array import YTArray
-from yt.utilities.lib.misc_utilities import \
-    obtain_rvec, obtain_rv_vec
-from yt.utilities.math_utils import resize_vector
-from yt.utilities.cosmology import Cosmology
 from yt.fields.derived_field import \
     ValidateParameter
 
@@ -27,16 +22,8 @@
     register_field_plugin
 
 from yt.utilities.math_utils import \
-    get_sph_r_component, \
     get_sph_theta_component, \
-    get_sph_phi_component, \
-    get_cyl_r_component, \
-    get_cyl_z_component, \
-    get_cyl_theta_component, \
-    get_cyl_r, get_cyl_theta, \
-    get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi, \
-    periodic_dist, euclidean_dist
+    get_sph_phi_component
 
 @register_field_plugin
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/my_plugin_fields.py
--- a/yt/fields/my_plugin_fields.py
+++ b/yt/fields/my_plugin_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from .field_plugin_registry import \
     register_field_plugin
 

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -16,8 +16,6 @@
 
 import numpy as np
 
-from yt.funcs import *
-from yt.units.yt_array import YTArray
 from yt.fields.derived_field import \
     ValidateParameter, \
     ValidateSpatial
@@ -125,7 +123,7 @@
     def particle_density(field, data):
         pos = data[ptype, coord_name].convert_to_units("code_length")
         mass = data[ptype, mass_name].convert_to_units("code_mass")
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d = data.deposit(pos, [mass], method = "sum")
         d = data.ds.arr(d, "code_mass")
         d /= data["index", "cell_volume"]
         return d
@@ -790,13 +788,19 @@
         kwargs = {}
         if nneighbors:
             kwargs['nneighbors'] = nneighbors
+        # This is for applying cutoffs, similar to in the SPLASH paper.
+        smooth_cutoff = data["index","cell_volume"]**(1./3)
+        smooth_cutoff.convert_to_units("code_length")
         # volume_weighted smooth operations return lists of length 1.
         rv = data.smooth(pos, [mass, hsml, dens, quan],
                          method="volume_weighted",
                          create_octree=True,
+                         index_fields=[smooth_cutoff],
                          kernel_name=kernel_name)[0]
         rv[np.isnan(rv)] = 0.0
         # Now some quick unit conversions.
+        # This should be used when seeking a non-normalized value:
+        rv /= hsml.uq**3 / hsml.uq.in_cgs().uq**3
         rv = data.apply_units(rv, field_units)
         return rv
     registry.add_field(field_name, function = _vol_weight,
@@ -827,7 +831,7 @@
         field_name = (ptype, "smoothed_density")
     else:
         field_name = (ptype, "%s_smoothed_density" % (kernel_name))
-    field_units = registry[ptype, mass_name].units
+
     def _nth_neighbor(field, data):
         pos = data[ptype, coord_name]
         pos.convert_to_units("code_length")

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/setup.py
--- a/yt/fields/setup.py
+++ b/yt/fields/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -17,12 +17,10 @@
 import re
 
 from yt.utilities.physical_constants import \
-    mh, \
-    mass_sun_cgs, \
     amu_cgs
 from yt.utilities.physical_ratios import \
     primordial_H_mass_fraction
-from yt.funcs import *
+
 from yt.utilities.chemical_formulas import \
     ChemicalFormula
 from .field_plugin_registry import \

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -1,13 +1,20 @@
-from yt.testing import *
 import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_array_almost_equal_nulp, \
+    assert_array_equal, \
+    assert_raises
 from yt.utilities.cosmology import \
-     Cosmology
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    Cosmology
 from yt.frontends.stream.fields import \
     StreamFieldInfo
 from yt.units.yt_array import \
-     YTArray, YTQuantity
+    YTArray, YTQuantity
+from yt.utilities.exceptions import \
+    YTFieldUnitError, \
+    YTFieldUnitParseError
 
 def setup():
     global base_ds
@@ -88,19 +95,6 @@
         return field
     return field[1]
 
-def _expand_field(field):
-    if isinstance(field, tuple):
-        return field
-    if field in KnownStreamFields:
-        fi = KnownStreamFields[field]
-        if fi.particle_type:
-            return ("all", field)
-        else:
-            return ("gas", field)
-    # Otherwise, we just guess.
-    if "particle" in field:
-        return ("all", field)
-    return ("gas", field)
 
 class TestFieldAccess(object):
     description = None

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -178,7 +178,8 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return  1.0/(2**self.max_level)
+        return (self.dataset.domain_width /
+                (self.dataset.domain_dimensions * 2**(self.max_level))).min()
 
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -185,27 +185,10 @@
             for alias in aliases:
                 self.alias((ptype, alias), (ptype, f), units = output_units)
 
-        # We'll either have particle_position or particle_position_[xyz]
-        if (ptype, "particle_position") in self.field_list or \
-           (ptype, "particle_position") in self.field_aliases:
-            particle_scalar_functions(ptype,
-                   "particle_position", "particle_velocity",
-                   self)
-        else:
-            # We need to check to make sure that there's a "known field" that
-            # overlaps with one of the vector fields.  For instance, if we are
-            # in the Stream frontend, and we have a set of scalar position
-            # fields, they will overlap with -- and be overridden by -- the
-            # "known" vector field that the frontend creates.  So the easiest
-            # thing to do is to simply remove the on-disk field (which doesn't
-            # exist) and replace it with a derived field.
-            if (ptype, "particle_position") in self and \
-                 self[ptype, "particle_position"]._function == NullFunc:
-                self.pop((ptype, "particle_position"))
-            particle_vector_functions(ptype,
-                    ["particle_position_%s" % ax for ax in 'xyz'],
-                    ["particle_velocity_%s" % ax for ax in 'xyz'],
-                    self)
+        ppos_fields = ["particle_position_%s" % ax for ax in 'xyz']
+        pvel_fields = ["particle_velocity_%s" % ax for ax in 'xyz']
+        particle_vector_functions(ptype, ppos_fields, pvel_fields, self)
+
         particle_deposition_functions(ptype, "particle_position",
             "particle_mass", self)
         standard_particle_fields(self, ptype)
@@ -219,7 +202,7 @@
             self.add_output_field(field, 
                                   units = self.ds.field_units.get(field, ""),
                                   particle_type = True)
-        self.setup_smoothed_fields(ptype, 
+        self.setup_smoothed_fields(ptype,
                                    num_neighbors=num_neighbors,
                                    ftype=ftype)
 

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -77,7 +77,7 @@
 
     def _set_units(self):
         self.unit_registry = UnitRegistry()
-        self.unit_registry.lut["code_time"] = (1.0, dimensions.time)
+        self.unit_registry.add("code_time", 1.0, dimensions.time)
         if self.cosmological_simulation:
             # Instantiate EnzoCosmology object for units and time conversions.
             self.cosmology = \

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -420,7 +420,7 @@
         Generates the conversion to various physical _units based on the parameter file
         """
         default_length_units = [u for u,v in default_unit_symbol_lut.items()
-                                if str(v[-1]) == "(length)"]
+                                if str(v[1]) == "(length)"]
         more_length_units = []
         for unit in default_length_units:
             if unit in prefixable_units:

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-Gadget frontend tests using the IsothermalCollapse dataset
+Gadget frontend tests
 
 
 
@@ -14,15 +14,49 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import \
+    data_dir_load, \
     requires_ds, \
-    data_dir_load
-from yt.frontends.gadget.api import GadgetHDF5Dataset
+    sph_answer
+from yt.frontends.gadget.api import GadgetHDF5Dataset, GadgetDataset
 
-isothermal = "IsothermalCollapse/snap_505.hdf5"
- at requires_file(isothermal)
+isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
+isothermal_bin = "IsothermalCollapse/snap_505"
+gdg = "GadgetDiskGalaxy/snapshot_0200.hdf5"
+
+iso_fields = (
+    ("gas", "density"),
+    ("gas", "temperature"),
+    ('gas', 'velocity_magnitude'),
+    ("deposit", "all_density"),
+    ("deposit", "all_count"),
+    ("deposit", "all_cic"),
+    ("deposit", "PartType0_density"),
+)
+iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
+
+gdg_fields = iso_fields + (("deposit", "PartType4_density"), )
+gdg_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
+
+
+ at requires_file(isothermal_h5)
+ at requires_file(isothermal_bin)
 def test_GadgetDataset():
-    kwargs = dict(bounding_box=[[-3,3], [-3,3], [-3,3]])
-    assert isinstance(data_dir_load(isothermal, kwargs=kwargs),
+    assert isinstance(data_dir_load(isothermal_h5, kwargs=iso_kwargs),
                       GadgetHDF5Dataset)
+    assert isinstance(data_dir_load(isothermal_bin, kwargs=iso_kwargs),
+                      GadgetDataset)
+
+
+ at requires_ds(isothermal_h5)
+def test_iso_collapse():
+    for test in sph_answer(isothermal_h5, 'snap_505', 2**17,
+                           iso_fields, ds_kwargs=iso_kwargs):
+        yield test
+
+ at requires_ds(gdg, big_data=True)
+def test_gadget_disk_galaxy():
+    for test in sph_answer(gdg, 'snap_505', 11907080, gdg_fields,
+                           ds_kwargs=gdg_kwargs):
+        yield test

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/owls/tests/test_outputs.py
--- a/yt/frontends/owls/tests/test_outputs.py
+++ b/yt/frontends/owls/tests/test_outputs.py
@@ -14,45 +14,33 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import \
+    requires_file
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
-    small_patch_amr, \
-    big_patch_amr, \
     data_dir_load, \
-    PixelizedProjectionValuesTest, \
-    FieldValuesTest, \
-    create_obj
+    sph_answer
 from yt.frontends.owls.api import OWLSDataset
 
-_fields = (("deposit", "all_density"), ("deposit", "all_count"),
-           ("deposit", "PartType0_density"),
-           ("deposit", "PartType4_density"))
+os33 = "snapshot_033/snap_033.0.hdf5"
 
-os33 = "snapshot_033/snap_033.0.hdf5"
+_fields = (
+    ("gas", "density"),
+    ("gas", "temperature"),
+    ('gas', 'He_p0_number_density'),
+    ('gas', 'N_p1_number_density'),
+    ('gas', 'velocity_magnitude'),
+    ("deposit", "all_density"),
+    ("deposit", "all_count"),
+    ("deposit", "all_cic"),
+    ("deposit", "PartType0_density"),
+    ("deposit", "PartType4_density"))
+
+
 @requires_ds(os33, big_data=True)
 def test_snapshot_033():
-    ds = data_dir_load(os33)
-    yield assert_equal, str(ds), "snap_033"
-    dso = [ None, ("sphere", ("c", (0.1, 'unitary')))]
-    dd = ds.all_data()
-    yield assert_equal, dd["particle_position"].shape[0], 2*(128*128*128)
-    yield assert_equal, dd["particle_position"].shape[1], 3
-    tot = sum(dd[ptype,"particle_position"].shape[0]
-              for ptype in ds.particle_types if ptype != "all")
-    yield assert_equal, tot, (2*128*128*128)
-    for dobj_name in dso:
-        for field in _fields:
-            for axis in [0, 1, 2]:
-                for weight_field in [None, "density"]:
-                    yield PixelizedProjectionValuesTest(
-                        os33, axis, field, weight_field,
-                        dobj_name)
-            yield FieldValuesTest(os33, field, dobj_name)
-        dobj = create_obj(ds, dobj_name)
-        s1 = dobj["ones"].sum()
-        s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+    for test in sph_answer(os33, 'snap_033', 2*128**3, _fields):
+        yield test
 
 
 @requires_file(os33)

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -19,7 +19,7 @@
 import unittest
 
 from yt.testing import assert_raises
-from yt.utilities.answer_testing.framework import data_dir_load
+from yt.convenience import load
 from yt.utilities.exceptions import YTOutputNotIdentified
 
 class TestEmptyLoad(unittest.TestCase):
@@ -40,6 +40,6 @@
         shutil.rmtree(self.tmpdir)
 
     def test_load_empty_file(self):
-        assert_raises(YTOutputNotIdentified, data_dir_load, "not_a_file")
-        assert_raises(YTOutputNotIdentified, data_dir_load, "empty_file")
-        assert_raises(YTOutputNotIdentified, data_dir_load, "empty_directory")
+        assert_raises(YTOutputNotIdentified, load, "not_a_file")
+        assert_raises(YTOutputNotIdentified, load, "empty_file")
+        assert_raises(YTOutputNotIdentified, load, "empty_directory")

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -32,6 +32,7 @@
 from yt.utilities.physical_constants import \
     G, \
     cm_per_kpc
+from yt import YTQuantity
 
 from .fields import \
     TipsyFieldInfo
@@ -167,9 +168,9 @@
         self.domain_dimensions = np.ones(3, "int32") * nz
         periodic = self.parameters.get('bPeriodic', True)
         period = self.parameters.get('dPeriod', None)
-        comoving = self.parameters.get('bComove', False)
         self.periodicity = (periodic, periodic, periodic)
-        if comoving and period is None:
+        self.comoving = self.parameters.get('bComove', False)
+        if self.comoving and period is None:
             period = 1.0
         if self.bounding_box is None:
             if periodic and period is not None:
@@ -186,7 +187,9 @@
             self.domain_left_edge = bbox[:,0]
             self.domain_right_edge = bbox[:,1]
 
-        if comoving:
+        # If the cosmology parameters dictionary got set when data is
+        # loaded, we can assume it's a cosmological data set
+        if self.comoving or self._cosmology_parameters is not None:
             cosm = self._cosmology_parameters or {}
             self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a
             self.cosmological_simulation = 1
@@ -224,8 +227,15 @@
             self.length_unit = self.quan(lu, 'kpc')*self.scale_factor
             self.mass_unit = self.quan(mu, 'Msun')
             density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
-            # Gasoline's hubble constant, dHubble0, is stored units of proper code time.
-            self.hubble_constant *= np.sqrt(G.in_units('kpc**3*Msun**-1*s**-2')*density_unit).value/(3.2407793e-18)
+
+            # If self.comoving is set, we know this is a gasoline data set,
+            # and we do the conversion on the hubble constant.
+            if self.comoving:
+                # Gasoline's hubble constant, dHubble0, is stored units of
+                # proper code time.
+                self.hubble_constant *= np.sqrt(G.in_units(
+                    'kpc**3*Msun**-1*s**-2') * density_unit).value / (
+                    3.2407793e-18)
             cosmo = Cosmology(self.hubble_constant,
                               self.omega_matter, self.omega_lambda)
             self.current_time = cosmo.hubble_time(self.current_redshift)
@@ -237,6 +247,24 @@
             density_unit = self.mass_unit / self.length_unit**3
         self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
+        # If unit base is defined by the user, override all relevant units
+        if self._unit_base is not None:
+            length = self._unit_base.get('length', self.length_unit)
+            length = self.quan(*length) if isinstance(length, tuple) else self.quan(length)
+            self.length_unit = length
+
+            mass = self._unit_base.get('mass', self.mass_unit)
+            mass = self.quan(*mass) if isinstance(mass, tuple) else self.quan(mass)
+            self.mass_unit = mass
+
+            density_unit = self.mass_unit / self.length_unit**3
+            self.time_unit = 1.0 / np.sqrt(G * density_unit)
+
+            time = self._unit_base.get('time', self.time_unit)
+            time = self.quan(*time) if isinstance(time, tuple) else self.quan(time)
+            self.time_unit = time
+
+
     @staticmethod
     def _validate_header(filename):
         '''

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/tipsy/fields.py
--- a/yt/frontends/tipsy/fields.py
+++ b/yt/frontends/tipsy/fields.py
@@ -38,7 +38,8 @@
         'FeMassFrac':("FeMassFrac", ("dimensionless", ["Fe_fraction"], None)),
         'c':("c", ("code_velocity", [""], None)),
         'acc':("acc", ("code_velocity / code_time", [""], None)),
-        'accg':("accg", ("code_velocity / code_time", [""], None))}
+        'accg':("accg", ("code_velocity / code_time", [""], None)),
+        'smoothlength':('smoothlength', ("code_length", ["smoothing_length"], None))}
 
     def __init__(self, ds, field_list, slice_info = None):
         for field in field_list:
@@ -60,15 +61,19 @@
 
     def setup_gas_particle_fields(self, ptype):
 
-        def _smoothing_length(field, data):
-            # For now, we hardcode num_neighbors.  We should make this configurable
-            # in the future.
-            num_neighbors = 64
-            fn, = add_nearest_neighbor_field(ptype, "particle_position", self, num_neighbors)
-            return data[ptype, 'nearest_neighbor_distance_%d' % num_neighbors]
+        num_neighbors = 65
+        fn, = add_nearest_neighbor_field(ptype, "particle_position", self, num_neighbors)
+        def _func():
+            def _smoothing_length(field, data):
+                # For now, we hardcode num_neighbors.  We should make this configurable
+                # in the future.
+                rv = data[ptype, 'nearest_neighbor_distance_%d' % num_neighbors]
+                #np.maximum(rv, 0.5*data[ptype, "Epsilon"], rv)
+                return rv
+            return _smoothing_length
 
         self.add_field(
             (ptype, "smoothing_length"),
-            function=_smoothing_length,
+            function=_func(),
             particle_type=True,
             units="code_length")

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/frontends/tipsy/tests/test_outputs.py
--- a/yt/frontends/tipsy/tests/test_outputs.py
+++ b/yt/frontends/tipsy/tests/test_outputs.py
@@ -14,15 +14,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import assert_equal, requires_file
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
-    small_patch_amr, \
-    big_patch_amr, \
     data_dir_load, \
-    PixelizedProjectionValuesTest, \
+    sph_answer, \
+    create_obj, \
     FieldValuesTest, \
-    create_obj
+    PixelizedProjectionValuesTest
 from yt.frontends.tipsy.api import TipsyDataset
 
 _fields = (("deposit", "all_density"),
@@ -62,9 +61,9 @@
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
 
-gasoline = "agora_1e11.00400/agora_1e11.00400"
- at requires_ds(gasoline, big_data = True, file_check = True)
-def test_gasoline():
+gasoline_dmonly = "agora_1e11.00400/agora_1e11.00400"
+ at requires_ds(gasoline_dmonly, big_data = True, file_check = True)
+def test_gasoline_dmonly():
     cosmology_parameters = dict(current_redshift = 0.0,
                                 omega_lambda = 0.728,
                                 omega_matter = 0.272,
@@ -72,7 +71,7 @@
     kwargs = dict(cosmology_parameters = cosmology_parameters,
                   unit_base = {'length': (1.0/60.0, "Mpccm/h")},
                   n_ref = 64)
-    ds = data_dir_load(gasoline, TipsyDataset, (), kwargs)
+    ds = data_dir_load(gasoline_dmonly, TipsyDataset, (), kwargs)
     yield assert_equal, str(ds), "agora_1e11.00400"
     dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
     dd = ds.all_data()
@@ -93,7 +92,22 @@
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
 
+tg_fields = (
+    ('gas', 'density'),
+    ('gas', 'temperature'),
+    ('gas', 'velocity_magnitude'),
+    ('gas', 'Fe_fraction'),
+    ('Stars', 'Metals'),
+)
 
+tipsy_gal = 'TipsyGalaxy/galaxy.00300'
+ at requires_ds(tipsy_gal)
+def test_tipsy_galaxy():
+    for test in sph_answer(tipsy_gal, 'galaxy.00300', 315372, tg_fields):
+        yield test
+        
+ at requires_file(gasoline_dmonly)
 @requires_file(pkdgrav)
 def test_TipsyDataset():
     assert isinstance(data_dir_load(pkdgrav), TipsyDataset)
+    assert isinstance(data_dir_load(gasoline_dmonly), TipsyDataset)

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -40,13 +40,14 @@
 
 cdef inline np.float64_t sph_kernel_cubic(np.float64_t x) nogil:
     cdef np.float64_t kernel
+    cdef np.float64_t C = 2.5464790894703255
     if x <= 0.5:
         kernel = 1.-6.*x*x*(1.-x)
     elif x>0.5 and x<=1.0:
         kernel = 2.*(1.-x)*(1.-x)*(1.-x)
     else:
         kernel = 0.
-    return kernel
+    return kernel * C
 
 ########################################################
 # Alternative SPH kernels for use with the Grid method #

diff -r e5b0df989a8378ebe70fc27e193a1fe93664f8f4 -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -55,10 +55,12 @@
                                np.int64_t *pinds, np.int64_t *pcounts,
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize)
+                               int *nsize, np.float64_t *oct_left_edges,
+                               np.float64_t *oct_dds)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
-                             np.int64_t nneighbors, np.int64_t domain_id, Oct **oct = ?)
+                             np.int64_t nneighbors, np.int64_t domain_id, 
+                             Oct **oct = ?, int extra_layer = ?)
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
                                np.float64_t *ppos,
                                np.float64_t **fields, 
@@ -78,7 +80,9 @@
                             np.int64_t *pcounts,
                             np.int64_t *pinds,
                             np.float64_t *ppos,
-                            np.float64_t cpos[3])
+                            np.float64_t cpos[3],
+                            np.float64_t* oct_left_edges,
+                            np.float64_t* oct_dds)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/076a50c91071/
Changeset:   076a50c91071
Branch:      yt
User:        xarthisius
Date:        2015-09-22 21:42:26+00:00
Summary:     Fix offset calculation for ascii aux files
Affected #:  1 file

diff -r 6a44fa11e59fa722b5cfd1cc13536f154325ce08 -r 076a50c910710d2067022f0ef64efd2a6efab9e7 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -382,8 +382,8 @@
                     pos += data_file.total_particles[ptype] * size
             else:
                 aux_fields_offsets[afield].update(
-                    {'DarkMatter': (1, tp["Gas"] + tp["Stars"]),
-                     'Gas': (1 + tp["DarkMatter"], tp["Stars"]),
+                    {'Gas': (1, tp["DarkMatter"] + tp["Stars"]),
+                     'DarkMatter': (1 + tp["Gas"], tp["Stars"]),
                      'Stars': (1 + tp["DarkMatter"] + tp["Gas"], 0)}
                 )
         return aux_fields_offsets

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list