[yt-svn] commit/yt-3.0: 9 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 11 06:58:49 PDT 2013


9 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/9cdb65cd9b41/
Changeset:   9cdb65cd9b41
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 20:04:50
Summary:     Initial implementation of ParticleFilters.
Affected #:  2 files

diff -r aba44265d752191a46e8a6a7a6bb86385ad4938d -r 9cdb65cd9b41b2e87cf01b53648eb119c321e675 yt/data_objects/particle_filters.py
--- /dev/null
+++ b/yt/data_objects/particle_filters.py
@@ -0,0 +1,81 @@
+"""
+This is a library for defining and using particle filters.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from contextlib import contextmanager
+
+from yt.utilities.exceptions import YTIllDefinedFilter
+from yt.funcs import *
+
+# One to many mapping
+filter_registry = defaultdict(list)
+
+class ParticleFilter(object):
+    def __init__(self, name, function, requires):
+        self.name = name
+        self.function = function
+        self.requires = requires[:]
+        pt = []
+        for r in requires:
+            if not isinstance(r, types.TupleType):
+                raise RuntimeError
+            if r[0] not in pt:
+                pt.append(r[0])
+        if len(pt) > 1:
+            raise RuntimeError
+        self.particle_type = pt[0]
+
+    @contextmanager
+    def apply(self, dobj):
+        with dobj._chunked_read(dobj._current_chunk):
+            # We won't be storing the field data from the whole read, so we
+            # start by filtering now.
+            filter = self.function(self, dobj)
+            yield
+            # Retain a reference here, and we'll filter all appropriate fields
+            # later.
+            fd = dobj.field_data
+        for f, tr in fd.items():
+            if f[0] != self.particle_type: continue
+            if tr.shape != filter.shape:
+                raise YTIllDefinedFilter(self, tr.shape, filter.shape)
+            dobj.field_data[self.name, f[1]] = tr[filter]
+
+    def available(self, field_list):
+        # Note that this assumes that all the fields in field_list have the
+        # same form as the 'requires' attributes.  This won't be true if the
+        # fields are implicitly "all" or something.
+        return all(field in field_list for field in self.requires)
+
+def add_particle_filter(name, function, requires = None):
+    if requires is None: requires = []
+    filter = ParticleFilter(name, function, requires)
+    filter_registry[name].append(filter)
+
+def particle_filter(name, requires = None):
+    def _pfilter(func):
+        add_particle_filter(name, func, requires)
+        return func
+    return _pfilter

diff -r aba44265d752191a46e8a6a7a6bb86385ad4938d -r 9cdb65cd9b41b2e87cf01b53648eb119c321e675 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -277,3 +277,13 @@
     def __str__(self):
         return "Particle bounds %s and %s exceed domain bounds %s and %s" % (
             self.mi, self.ma, self.dle, self.dre)
+
+class YTIllDefinedFilter(YTException):
+    def __init__(self, filter, s1, s2):
+        self.filter = filter
+        self.s1 = s1
+        self.s2 = s2
+
+    def __str__(self):
+        return "Filter '%s' ill-defined.  Applied to shape %s but is shape %s." % (
+            self.filter, self.s1, self.s2)


https://bitbucket.org/yt_analysis/yt-3.0/commits/996606f015ee/
Changeset:   996606f015ee
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 20:26:35
Summary:     Make filtered_type explicit.
Affected #:  1 file

diff -r 9cdb65cd9b41b2e87cf01b53648eb119c321e675 -r 996606f015ee15221e2787900793c74c3ed73a2d yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -32,31 +32,28 @@
 # One to many mapping
 filter_registry = defaultdict(list)
 
+class DummyFieldInfo(object):
+    particle_type = True
+dfi = DummyFieldInfo()
+
 class ParticleFilter(object):
-    def __init__(self, name, function, requires):
+    def __init__(self, name, function, requires, filtered_type):
         self.name = name
         self.function = function
         self.requires = requires[:]
-        pt = []
-        for r in requires:
-            if not isinstance(r, types.TupleType):
-                raise RuntimeError
-            if r[0] not in pt:
-                pt.append(r[0])
-        if len(pt) > 1:
-            raise RuntimeError
-        self.particle_type = pt[0]
+        self.particle_type = filtered_type
 
     @contextmanager
     def apply(self, dobj):
         with dobj._chunked_read(dobj._current_chunk):
-            # We won't be storing the field data from the whole read, so we
-            # start by filtering now.
-            filter = self.function(self, dobj)
-            yield
-            # Retain a reference here, and we'll filter all appropriate fields
-            # later.
-            fd = dobj.field_data
+            with dobj._field_type_state(self.particle_type, dfi):
+                # We won't be storing the field data from the whole read, so we
+                # start by filtering now.
+                filter = self.function(self, dobj)
+                yield
+                # Retain a reference here, and we'll filter all appropriate fields
+                # later.
+                fd = dobj.field_data
         for f, tr in fd.items():
             if f[0] != self.particle_type: continue
             if tr.shape != filter.shape:
@@ -69,13 +66,13 @@
         # fields are implicitly "all" or something.
         return all(field in field_list for field in self.requires)
 
-def add_particle_filter(name, function, requires = None):
+def add_particle_filter(name, function, requires = None, filtered_type = "all"):
     if requires is None: requires = []
-    filter = ParticleFilter(name, function, requires)
+    filter = ParticleFilter(name, function, requires, filtered_type)
     filter_registry[name].append(filter)
 
-def particle_filter(name, requires = None):
+def particle_filter(name, requires = None, filtered_type = "all"):
     def _pfilter(func):
-        add_particle_filter(name, func, requires)
+        add_particle_filter(name, func, requires, filtered_type)
         return func
     return _pfilter


https://bitbucket.org/yt_analysis/yt-3.0/commits/fc78b0a80896/
Changeset:   fc78b0a80896
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 21:38:14
Summary:     First draft of working particle filter implementation.
Affected #:  4 files

diff -r 996606f015ee15221e2787900793c74c3ed73a2d -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -467,7 +467,15 @@
         if self._current_chunk is None:
             self.hierarchy._identify_base_chunk(self)
         if fields is None: return
-        fields = self._determine_fields(fields)
+        nfields = []
+        for field in self._determine_fields(fields):
+            if field[0] in self.pf.h.filtered_particle_types:
+                f = self.pf.known_filters[field[0]]
+                with f.apply(self):
+                    self.get_data([(f.filtered_type, field[1])])
+            else:
+                nfields.append(field)
+        fields = nfields
         # Now we collect all our fields
         # Here is where we need to perform a validation step, so that if we
         # have a field requested that we actually *can't* yet get, we put it

diff -r 996606f015ee15221e2787900793c74c3ed73a2d -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -24,8 +24,11 @@
 """
 
 import numpy as np
+import copy
 from contextlib import contextmanager
 
+from yt.data_objects.field_info_container import \
+    NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
 from yt.funcs import *
 
@@ -41,12 +44,12 @@
         self.name = name
         self.function = function
         self.requires = requires[:]
-        self.particle_type = filtered_type
+        self.filtered_type = filtered_type
 
     @contextmanager
     def apply(self, dobj):
         with dobj._chunked_read(dobj._current_chunk):
-            with dobj._field_type_state(self.particle_type, dfi):
+            with dobj._field_type_state(self.filtered_type, dfi):
                 # We won't be storing the field data from the whole read, so we
                 # start by filtering now.
                 filter = self.function(self, dobj)
@@ -55,7 +58,7 @@
                 # later.
                 fd = dobj.field_data
         for f, tr in fd.items():
-            if f[0] != self.particle_type: continue
+            if f[0] != self.filtered_type: continue
             if tr.shape != filter.shape:
                 raise YTIllDefinedFilter(self, tr.shape, filter.shape)
             dobj.field_data[self.name, f[1]] = tr[filter]
@@ -64,7 +67,12 @@
         # Note that this assumes that all the fields in field_list have the
         # same form as the 'requires' attributes.  This won't be true if the
         # fields are implicitly "all" or something.
-        return all(field in field_list for field in self.requires)
+        return all((self.filtered_type, field) in field_list for field in self.requires)
+
+    def wrap_func(self, field_name, old_fi):
+        new_fi = copy.copy(old_fi)
+        new_fi.name = (self.filtered_type, field_name[1])
+        return new_fi
 
 def add_particle_filter(name, function, requires = None, filtered_type = "all"):
     if requires is None: requires = []

diff -r 996606f015ee15221e2787900793c74c3ed73a2d -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -88,6 +88,7 @@
         self.file_style = file_style
         self.conversion_factors = {}
         self.parameters = {}
+        self.known_filters = {}
 
         # path stuff
         self.parameter_filename = str(filename)
@@ -249,6 +250,9 @@
         else:
             raise YTGeometryNotSupported(self.geometry)
 
+    def add_particle_filter(self, filter):
+        self.known_filters[filter.name] = filter
+
     _last_freq = (None, None)
     _last_finfo = None
     def _get_field_info(self, ftype, fname):

diff -r 996606f015ee15221e2787900793c74c3ed73a2d -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -162,8 +162,35 @@
                 self.parameter_file.field_info[field] = known_fields[field]
 
     def _setup_derived_fields(self):
+        self.derived_field_list = []
+        self.filtered_particle_types = []
+        fc, fac = self._derived_fields_to_check()
+        self._derived_fields_add(fc, fac)
+        # Now we do a special case for all filters.
+        kf = self.parameter_file.known_filters
         fi = self.parameter_file.field_info
-        self.derived_field_list = []
+        fd = self.parameter_file.field_dependencies
+        for filter_name in kf:
+            filter = kf[filter_name]
+            if not filter.available(self.derived_field_list):
+                continue
+            # Only fields whose dependencies have been reached get added here.
+            available = False
+            for fn in self.derived_field_list:
+                if fn[0] == filter.filtered_type:
+                    # Now we can add this
+                    available = True
+                    self.derived_field_list.append(
+                        (filter.name, fn[1]))
+                    fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
+                    # Now we append the dependencies
+                    fd[filter.name, fn[1]] = fd[fn]
+            if available:
+                self.parameter_file.particle_types += (filter_name,)
+                self.filtered_particle_types.append(filter_name)
+
+    def _derived_fields_to_check(self):
+        fi = self.parameter_file.field_info
         # First we construct our list of fields to check
         fields_to_check = []
         fields_to_allcheck = []
@@ -187,6 +214,15 @@
                 new_fields.append(new_fi.name)
             fields_to_check += new_fields
             fields_to_allcheck.append(field)
+        return fields_to_check, fields_to_allcheck
+
+    def _derived_fields_add(self, fields_to_check = None,
+                            fields_to_allcheck = None):
+        if fields_to_check is None:
+            fields_to_check = []
+        if fields_to_allcheck is None:
+            fields_to_allcheck = []
+        fi = self.parameter_file.field_info
         for field in fields_to_check:
             try:
                 fd = fi[field].get_dependencies(pf = self.parameter_file)


https://bitbucket.org/yt_analysis/yt-3.0/commits/75aa65ad91ac/
Changeset:   75aa65ad91ac
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 22:59:23
Summary:     Consolidate processing for filtered particles.  Fix filtering of 0-size arrays.

Also, add returning of newly added registry fields, which will help facilitate
setting up dependencies for dynamically added particle types.
Affected #:  3 files

diff -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 -r 75aa65ad91ac63409fb9cea0bc0c1af8f2c24d94 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -468,14 +468,20 @@
             self.hierarchy._identify_base_chunk(self)
         if fields is None: return
         nfields = []
+        apply_fields = defaultdict(list)
         for field in self._determine_fields(fields):
             if field[0] in self.pf.h.filtered_particle_types:
                 f = self.pf.known_filters[field[0]]
-                with f.apply(self):
-                    self.get_data([(f.filtered_type, field[1])])
+                apply_fields[field[0]].append(
+                    (f.filtered_type, field[1]))
             else:
                 nfields.append(field)
+        for filter_type in apply_fields:
+            f = self.pf.known_filters[filter_type]
+            with f.apply(self):
+                self.get_data(apply_fields[filter_type])
         fields = nfields
+        if len(fields) == 0: return
         # Now we collect all our fields
         # Here is where we need to perform a validation step, so that if we
         # have a field requested that we actually *can't* yet get, we put it

diff -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 -r 75aa65ad91ac63409fb9cea0bc0c1af8f2c24d94 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -42,6 +42,7 @@
     mh
 
 def particle_deposition_functions(ptype, coord_name, mass_name, registry):
+    orig = set(registry.keys())
     def particle_count(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
@@ -112,6 +113,9 @@
             particle_type = True,
             units = r"\mathrm{M}_\odot")
 
+    return list(set(registry.keys()).difference(orig))
+
+
 def particle_scalar_functions(ptype, coord_name, vel_name, registry):
 
     # Now we have to set up the various velocity and coordinate things.  In the
@@ -119,6 +123,8 @@
     # elsewhere, and stop using these.
     
     # Note that we pass in _ptype here so that it's defined inside the closure.
+    orig = set(registry.keys())
+
     def _get_coord_funcs(axi, _ptype):
         def _particle_velocity(field, data):
             return data[_ptype, vel_name][:,axi]
@@ -132,9 +138,12 @@
         registry.add_field((ptype, "particle_position_%s" % ax),
             particle_type = True, function = p)
 
+    return list(set(registry.keys()).difference(orig))
+
 def particle_vector_functions(ptype, coord_names, vel_names, registry):
 
     # This will column_stack a set of scalars to create vector fields.
+    orig = set(registry.keys())
 
     def _get_vec_func(_ptype, names):
         def particle_vectors(field, data):
@@ -147,3 +156,4 @@
                        function=_get_vec_func(ptype, vel_names),
                        particle_type=True)
 
+    return list(set(registry.keys()).difference(orig))

diff -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 -r 75aa65ad91ac63409fb9cea0bc0c1af8f2c24d94 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -59,9 +59,18 @@
                 fd = dobj.field_data
         for f, tr in fd.items():
             if f[0] != self.filtered_type: continue
-            if tr.shape != filter.shape:
+            if tr.shape != filter.shape and tr.shape[0] != filter.shape[0]:
                 raise YTIllDefinedFilter(self, tr.shape, filter.shape)
-            dobj.field_data[self.name, f[1]] = tr[filter]
+            elif filter.size == 0:
+                # Filtering empty set.  This keeps our dimensions correct.
+                # Otherwise we end up with out-of-axis and shape problems.
+                d = tr.copy() 
+            elif len(tr.shape) > len(filter.shape):
+                # Filter must always be 1D
+                d = tr[filter,:]
+            else:
+                d = tr[filter]
+            dobj.field_data[self.name, f[1]] = d
 
     def available(self, field_list):
         # Note that this assumes that all the fields in field_list have the


https://bitbucket.org/yt_analysis/yt-3.0/commits/fa53eed1f628/
Changeset:   fa53eed1f628
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 23:26:34
Summary:     Ensuring that filtered types don't get concatenated.
Affected #:  1 file

diff -r 75aa65ad91ac63409fb9cea0bc0c1af8f2c24d94 -r fa53eed1f6280805034803765678eff2a9defd2d yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -72,7 +72,9 @@
     def _AllFields(field, data):
         v = []
         for ptype in data.pf.particle_types:
-            if ptype == "all": continue
+            if ptype == "all" or \
+                ptype in data.pf.known_filters:
+                  continue
             v.append(data[ptype, fname].copy())
         rv = np.concatenate(v, axis=0)
         return rv
@@ -82,7 +84,9 @@
     def _AllFields(field, data):
         v = []
         for ptype in data.pf.particle_types:
-            if ptype == "all": continue
+            if ptype == "all" or \
+                ptype in data.pf.known_filters:
+                  continue
             v.append(data[ptype, fname][:,axi])
         rv = np.concatenate(v, axis=0)
         return rv


https://bitbucket.org/yt_analysis/yt-3.0/commits/a035ee4a2397/
Changeset:   a035ee4a2397
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 00:12:43
Summary:     Refactoring and starting to setup generic particle field addition.

This will also be the start of Chris's idea for generic particle fields, as we
can start having registries of particle types, etc etc.
Affected #:  5 files

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -37,6 +37,8 @@
     output_type_registry
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
+from yt.data_objects.particle_filters import \
+    filter_registry
 from yt.utilities.minimal_representation import \
     MinimalStaticOutput
 
@@ -60,6 +62,8 @@
     geometry = "cartesian"
     coordinates = None
     max_level = 99
+    _particle_mass_name = None
+    _particle_coordinates_name = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -251,6 +255,16 @@
             raise YTGeometryNotSupported(self.geometry)
 
     def add_particle_filter(self, filter):
+        if isinstance(filter, types.StringTypes):
+            used = False
+            for f in filter_registry[filter]:
+                used = self.h._setup_filtered_type(f)
+                if used: break
+            if not used: return
+            filter = f
+        else:
+            used = self.h._setup_filtered_type(filter)
+        if not used: return
         self.known_filters[filter.name] = filter
 
     _last_freq = (None, None)

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -706,6 +706,9 @@
     _hierarchy_class = EnzoHierarchy
     _fieldinfo_fallback = EnzoFieldInfo
     _fieldinfo_known = KnownEnzoFields
+    _particle_mass_name = "ParticleMass"
+    _particle_coordinates_name = "Coordinates"
+
     def __init__(self, filename, data_style=None,
                  file_style = None,
                  parameter_override = None,

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -357,6 +357,8 @@
     _hierarchy_class = RAMSESGeometryHandler
     _fieldinfo_fallback = RAMSESFieldInfo
     _fieldinfo_known = KnownRAMSESFields
+    _particle_mass_name = "ParticleMass"
+    _particle_coordinates_name = "Coordinates"
     
     def __init__(self, filename, data_style='ramses',
                  fields = None,

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -129,6 +129,8 @@
     _file_class = GadgetBinaryFile
     _fieldinfo_fallback = GadgetFieldInfo
     _fieldinfo_known = KnownGadgetFields
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
     _header_spec = (('Npart', 6, 'i'),
                     ('Massarr', 6, 'd'),
                     ('Time', 1, 'd'),

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -39,6 +39,8 @@
     data_object_registry
 from yt.data_objects.field_info_container import \
     NullFunc
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions
 from yt.utilities.io_handler import io_registry
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -166,28 +168,36 @@
         self.filtered_particle_types = []
         fc, fac = self._derived_fields_to_check()
         self._derived_fields_add(fc, fac)
-        # Now we do a special case for all filters.
-        kf = self.parameter_file.known_filters
+
+    def _setup_filtered_type(self, filter):
+        if not filter.available(self.derived_field_list):
+            return False
         fi = self.parameter_file.field_info
         fd = self.parameter_file.field_dependencies
-        for filter_name in kf:
-            filter = kf[filter_name]
-            if not filter.available(self.derived_field_list):
-                continue
-            # Only fields whose dependencies have been reached get added here.
-            available = False
-            for fn in self.derived_field_list:
-                if fn[0] == filter.filtered_type:
-                    # Now we can add this
-                    available = True
-                    self.derived_field_list.append(
-                        (filter.name, fn[1]))
-                    fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
-                    # Now we append the dependencies
-                    fd[filter.name, fn[1]] = fd[fn]
-            if available:
-                self.parameter_file.particle_types += (filter_name,)
-                self.filtered_particle_types.append(filter_name)
+        available = False
+        for fn in self.derived_field_list:
+            if fn[0] == filter.filtered_type:
+                # Now we can add this
+                available = True
+                self.derived_field_list.append(
+                    (filter.name, fn[1]))
+                fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
+                # Now we append the dependencies
+                fd[filter.name, fn[1]] = fd[fn]
+        if available:
+            self.parameter_file.particle_types += (filter.name,)
+            self.filtered_particle_types.append(filter.name)
+            self._setup_particle_fields(filter.name, True)
+        return available
+
+    def _setup_particle_fields(self, ptype, filtered = False):
+        pf = self.parameter_file
+        pmass = self.parameter_file._particle_mass_name
+        pcoord = self.parameter_file._particle_coordinates_name
+        if pmass is None or pcoord is None: return
+        df = particle_deposition_functions(ptype,
+            pcoord, pmass, self.parameter_file.field_info)
+        self._derived_fields_add(df)
 
     def _derived_fields_to_check(self):
         fi = self.parameter_file.field_info


https://bitbucket.org/yt_analysis/yt-3.0/commits/856bb17297ca/
Changeset:   856bb17297ca
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 00:22:16
Summary:     Simplify filter identification and add particle field names to Tipsy & OWLS
Affected #:  2 files

diff -r a035ee4a2397a086950061ed55a9918093bec4ec -r 856bb17297ca44f470396878fcfb85b18d3c4c81 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -259,13 +259,15 @@
             used = False
             for f in filter_registry[filter]:
                 used = self.h._setup_filtered_type(f)
-                if used: break
-            if not used: return
-            filter = f
+                if used:
+                    filter = f
+                    break
         else:
             used = self.h._setup_filtered_type(filter)
-        if not used: return
+        if not used:
+            return False
         self.known_filters[filter.name] = filter
+        return True
 
     _last_freq = (None, None)
     _last_finfo = None

diff -r a035ee4a2397a086950061ed55a9918093bec4ec -r 856bb17297ca44f470396878fcfb85b18d3c4c81 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -260,6 +260,8 @@
     _file_class = ParticleFile
     _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
     _fieldinfo_known = KnownOWLSFields
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
     _header_spec = None # Override so that there's no confusion
 
     def __init__(self, filename, data_style="OWLS"):
@@ -339,6 +341,8 @@
     _file_class = TipsyFile
     _fieldinfo_fallback = TipsyFieldInfo
     _fieldinfo_known = KnownTipsyFields
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
     _header_spec = (('time',    'd'),
                     ('nbodies', 'i'),
                     ('ndim',    'i'),


https://bitbucket.org/yt_analysis/yt-3.0/commits/8482acd3f0f0/
Changeset:   8482acd3f0f0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 16:04:25
Summary:     Cache grid mask count.
Affected #:  1 file

diff -r 856bb17297ca44f470396878fcfb85b18d3c4c81 -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -67,6 +67,7 @@
         self._child_mask = self._child_indices = self._child_index_mask = None
         self.start_index = None
         self._last_mask = None
+        self._last_count = -1
         self._last_selector_id = None
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
@@ -447,14 +448,14 @@
     def select_icoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='int64')
-        coords = convert_mask_to_indices(mask, mask.sum())
+        coords = convert_mask_to_indices(mask, self._last_count)
         coords += self.get_global_startindex()[None, :]
         return coords
 
     def select_fcoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='float64')
-        coords = convert_mask_to_indices(mask, mask.sum()).astype("float64")
+        coords = convert_mask_to_indices(mask, self._last_count).astype("float64")
         coords += 0.5
         coords *= self.dds[None, :]
         coords += self.LeftEdge[None, :]
@@ -471,7 +472,7 @@
     def select_ires(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty(0, dtype='int64')
-        coords = np.empty(mask.sum(), dtype='int64')
+        coords = np.empty(self._last_count, dtype='int64')
         coords[:] = self.Level
         return coords
 
@@ -496,6 +497,10 @@
         else:
             self._last_mask = mask = selector.fill_mask(self)
             self._last_selector_id = id(selector)
+            if mask is None:
+                self._last_count = 0
+            else:
+                self._last_count = mask.sum()
         return mask
 
     def select(self, selector, source, dest, offset):
@@ -508,7 +513,7 @@
     def count(self, selector):
         mask = self._get_selector_mask(selector)
         if mask is None: return 0
-        return mask.sum()
+        return self._last_count
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results


https://bitbucket.org/yt_analysis/yt-3.0/commits/bd8062a43ab5/
Changeset:   bd8062a43ab5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-08 15:24:03
Summary:     Merging with tip
Affected #:  10 files

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -617,14 +617,15 @@
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
         self.fields = [k for k in self.field_data.keys()
-                       if k not in self._container_fields]
+                       if k not in self._key_fields]
         from yt.visualization.plot_window import \
             GetWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, fields=list(self.fields), origin=origin,
+                         frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -490,14 +490,21 @@
         """
         normal = self.normal
         center = self.center
+        self.fields = [k for k in self.field_data.keys()
+                       if k not in self._key_fields]
         from yt.visualization.plot_window import \
             GetObliqueWindowParameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal, center, width, self.pf)
+        from yt.visualization.fixed_resolution import \
+            ObliqueFixedResolutionBuffer
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_units = units
-        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
-                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw = PWViewerMPL(
+            self, bounds, fields=self.fields, origin='center-window',
+            periodic=False, oblique=True,
+            frb_generator=ObliqueFixedResolutionBuffer,
+            plot_type='OffAxisSlice')
         pw.set_axes_unit(axes_unit)
         return pw
 

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -62,6 +62,7 @@
     geometry = "cartesian"
     coordinates = None
     max_level = 99
+    storage_filename = None
     _particle_mass_name = None
     _particle_coordinates_name = None
 

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -212,14 +212,6 @@
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
 
-    def _chunk_io(self, dobj, cache = True):
-        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        # We'll take the max of 128 and the number of processors
-        nl = max(16, ytcfg.getint("yt", "__topcomm_parallel_size"))
-        for gs in list_chunks(gobjs, nl):
-            yield YTDataChunk(dobj, "io", gs, self._count_selection,
-                              cache = cache)
-
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
     _fieldinfo_fallback = FLASHFieldInfo

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -92,6 +92,6 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    data = ds[g.id - g._id_offset,:,:,:].transpose()[mask]
+                    data = ds[g.id - g._id_offset,:,:,:].transpose()
                     ind += g.select(selector, data, rv[field], ind) # caches
         return rv

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -34,6 +34,8 @@
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
+from yt.geometry.particle_geometry_handler import \
+    ParticleGeometryHandler
 from yt.data_objects.static_output import \
     StaticOutput
 from yt.utilities.logger import ytLogger as mylog
@@ -47,6 +49,8 @@
     mpc_conversion, sec_conversion
 from yt.utilities.flagging_methods import \
     FlaggingGrid
+from yt.frontends.sph.data_structures import \
+    ParticleFile
 
 from .fields import \
     StreamFieldInfo, \
@@ -704,3 +708,122 @@
         assign_particle_data(pf, pdata)
     
     return pf
+
+class StreamParticleGeometryHandler(ParticleGeometryHandler):
+
+    
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamParticleGeometryHandler, self).__init__(pf, data_style)
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+class StreamParticleFile(ParticleFile):
+    pass
+
+class StreamParticlesStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamParticleGeometryHandler
+    _file_class = StreamParticleFile
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_particles"
+    file_count = 1
+    filename_template = "stream_file"
+
+def load_particles(data, sim_unit_to_cm, bbox=None,
+                      sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a set of particles into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
+
+    This should allow a collection of particle data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+
+    This will initialize an Octree of data.  Note that fluid fields will not
+    work yet, or possibly ever.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        Particles positions must be named "particle_position_x",
+        "particle_position_y", "particle_position_z".
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    Examples
+    --------
+
+    >>> pos = [np.random.random(128*128*128) for i in range(3)]
+    >>> data = dict(particle_position_x = pos[0],
+    ...             particle_position_y = pos[1],
+    ...             particle_position_z = pos[2])
+    >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]])
+    >>> pf = load_particles(data, 3.08e24, bbox=bbox)
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'stream_file':data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "ParticleData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamParticlesStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,9 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
 
 KnownStreamFields = FieldInfoContainer()
 add_stream_field = KnownStreamFields.add_field
@@ -69,3 +72,9 @@
 
 add_field(("all", "ParticleMass"), function = TranslationFunc("particle_mass"),
           particle_type=True)
+
+particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
+                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
+                          StreamFieldInfo)
+particle_deposition_functions("all", "Coordinates", "ParticleMass",
+                               StreamFieldInfo)

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -32,6 +32,8 @@
 from yt.utilities.io_handler import \
     BaseIOHandler, _axis_ids
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.lib.geometry_utils import compute_morton
+from yt.utilities.exceptions import *
 
 class IOHandlerStream(BaseIOHandler):
 
@@ -127,3 +129,83 @@
     def _read_exception(self):
         return KeyError
 
+class StreamParticleIOHandler(BaseIOHandler):
+
+    _data_style = "stream_particles"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        # We first need a set of masks for each particle type
+        ptf = defaultdict(list)
+        psize = defaultdict(lambda: 0)
+        chunks = list(chunks)
+        for ftype, fname in fields:
+            ptf[ftype].append(fname)
+        # For this type of file, we actually have something slightly different.
+        # We are given a list of ParticleDataChunks, which is composed of
+        # individual ParticleOctreeSubsets.  The data_files attribute on these
+        # may in fact overlap.  So we will iterate over a union of all the
+        # data_files.
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            f = self.fields[data_file.filename]
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                assert(ptype == "all")
+                psize[ptype] += selector.count_points(
+                        f["particle_position_x"],
+                        f["particle_position_y"],
+                        f["particle_position_z"])
+        # Now we have all the sizes, and we can allocate
+        ind = {}
+        for field in fields:
+            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
+            rv[field] = np.empty(psize[field[0]], dtype="float64")
+            ind[field] = 0
+        for data_file in data_files:
+            f = self.fields[data_file.filename]
+            for ptype, field_list in sorted(ptf.items()):
+                assert(ptype == "all")
+                mask = selector.select_points(
+                        f["particle_position_x"],
+                        f["particle_position_y"],
+                        f["particle_position_z"])
+                if mask is None: continue
+                for field in field_list:
+                    data = f[field][mask,...]
+                    my_ind = ind[ptype, field]
+                    mylog.debug("Filling from %s to %s with %s",
+                        my_ind, my_ind+data.shape[0], field)
+                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
+                    ind[ptype, field] += data.shape[0]
+        return rv
+
+    def _initialize_index(self, data_file, regions):
+        # self.fields[g.id][fname] is the pattern here
+        pos = np.column_stack(self.fields[data_file.filename][
+                              "particle_position_%s" % ax] for ax in 'xyz')
+        if np.any(pos.min(axis=0) <= data_file.pf.domain_left_edge) or \
+           np.any(pos.max(axis=0) >= data_file.pf.domain_right_edge):
+            raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                                   data_file.pf.domain_left_edge,
+                                   data_file.pf.domain_right_edge)
+        regions.add_data_file(pos, data_file.file_id)
+        morton = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+        return morton
+
+    def _count_particles(self, data_file):
+        npart = self.fields[data_file.filename]["particle_position_x"].size
+        return {'all': npart}
+
+    def _identify_fields(self, data_file):
+        return [ ("all", k) for k in self.fields[data_file.filename].keys()]

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -271,15 +271,15 @@
         # per cell, M_k, and Q_k and also the number of particles
         # deposited into each one
         # the M_k term
-        self.omk= np.zeros(self.nvals, dtype="float64")
+        self.omk= np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray omkarr= self.omk
         self.mk= <np.float64_t*> omkarr.data
         # the Q_k term
-        self.oqk= np.zeros(self.nvals, dtype="float64")
+        self.oqk= np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray oqkarr= self.oqk
         self.qk= <np.float64_t*> oqkarr.data
         # particle count
-        self.oi = np.zeros(self.nvals, dtype="float64")
+        self.oi = np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray oiarr = self.oi
         self.i = <np.float64_t*> oiarr.data
 
@@ -368,11 +368,11 @@
     cdef np.float64_t *w
     cdef public object ow
     def initialize(self):
-        self.owf = np.zeros(self.nvals, dtype='float64')
+        self.owf = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray wfarr = self.owf
         self.wf = <np.float64_t*> wfarr.data
         
-        self.ow = np.zeros(self.nvals, dtype='float64')
+        self.ow = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray warr = self.ow
         self.w = <np.float64_t*> warr.data
     

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -55,7 +55,7 @@
 from yt.config import ytcfg
 from yt.funcs import \
     mylog, defaultdict, iterable, ensure_list, \
-    fix_axis, get_image_suffix
+    ensure_tuple, fix_axis, get_image_suffix
 from yt.utilities.lib import write_png_to_string
 from yt.utilities.definitions import \
     x_dict, x_names, \
@@ -75,7 +75,8 @@
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+    if version.LooseVersion(matplotlib.__version__) < \
+        version.LooseVersion("1.2.0"):
         from matplotlib.pyparsing import ParseFatalException
     else:
         from matplotlib.pyparsing_py2 import ParseFatalException
@@ -197,7 +198,8 @@
         width = validate_iterable_width(width)
     else:
         try:
-            assert isinstance(width, Number), "width (%s) is invalid" % str(width)
+            assert isinstance(width, Number), \
+              "width (%s) is invalid" % str(width)
         except AssertionError, e:
             raise YTInvalidWidthError(e)
         width = ((width, '1'), (width, '1'))
@@ -208,7 +210,8 @@
             assert_valid_width_tuple(depth)
         else:
             try:
-                assert isinstance(depth, Number), "width (%s) is invalid" % str(depth)
+                assert isinstance(depth, Number), \
+                  "width (%s) is invalid" % str(depth)
             except AssertionError, e:
                 raise YTInvalidWidthError(e)
             depth = ((depth, '1'),)
@@ -270,7 +273,8 @@
 
     Parameters
     ----------
-    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or :class:`yt.data_objects.data_containers.AMRSliceBase`
+    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or
+                  :class:`yt.data_objects.data_containers.AMRSliceBase`
         This is the source to be pixelized, which can be a projection or a
         slice.  (For cutting planes, see
         `yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
@@ -294,7 +298,8 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
-                 periodic=True, origin='center-window', oblique=False, window_size=10.0):
+                 periodic=True, origin='center-window', oblique=False,
+                 window_size=10.0):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)
@@ -311,7 +316,8 @@
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in range(len(self.data_source.center))
+            center = [self.data_source.center[i] for i in
+                      range(len(self.data_source.center))
                       if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
@@ -461,9 +467,10 @@
 
         parameters
         ----------
-        width : float, array of floats, (float, unit) tuple, or tuple of (float, unit) tuples.
-             Width can have four different formats to support windows with variable
-             x and y widths.  They are:
+        width : float, array of floats, (float, unit) tuple, or tuple of
+                (float, unit) tuples.
+             Width can have four different formats to support windows with
+             variable x and y widths.  They are:
 
              ==================================     =======================
              format                                 example
@@ -474,13 +481,14 @@
              (float, float)                         (0.2, 0.3)
              ==================================     =======================
 
-             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-             the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-             in code units.  If units are provided the resulting plot axis labels will
-             use the supplied units.
+             For example, (10, 'kpc') requests a plot window that is 10
+             kiloparsecs wide in the x and y directions,
+             ((10,'kpc'),(15,'kpc')) requests a window that is 10 kiloparsecs
+             wide along the x axis and 15 kiloparsecs wide along the y axis.
+             In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y
+             width of 0.3 in code units.  If units are provided the resulting
+             plot axis labels will use the supplied units.
         unit : str
              the unit the width has been specified in. If width is a tuple, this
              argument is ignored. Defaults to code units.
@@ -734,9 +742,9 @@
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the
-            units, and only show the axes name. If unit_name is a tuple, the first
-            element is assumed to be the unit for the x axis and the second element
-            the unit for the y axis.
+            units, and only show the axes name. If unit_name is a tuple, the
+            first element is assumed to be the unit for the x axis and the
+            second element the unit for the y axis.
 
         Raises
         ------
@@ -784,6 +792,7 @@
             self._frb_generator = kwargs.pop("frb_generator")
         if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
+        self.plot_fields = ensure_list(kwargs.pop("fields"))
         font_size = kwargs.pop("fontsize", 18)
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=font_size, fname=font_path)
@@ -816,8 +825,9 @@
             return 0.0, 0.0
         else:
             mylog.warn("origin = {0}".format(origin))
-            msg = ('origin keyword "{0}" not recognized, must declare "domain" '
-                   'or "center" as the last term in origin.').format(self.origin)
+            msg = \
+              ('origin keyword "{0}" not recognized, must declare "domain" '
+               'or "center" as the last term in origin.').format(self.origin)
             raise RuntimeError(msg)
 
         if origin[0] == 'lower':
@@ -854,13 +864,14 @@
         else:
             fields = self._frb.keys()
         self._colorbar_valid = True
-        for f in self.fields:
+        for f in self.data_source._determine_fields(self.plot_fields):
             axis_index = self.data_source.axis
 
             xc, yc = self._setup_origin()
 
             if self._axes_unit_names is None:
-                unit = get_smallest_appropriate_unit(self.xlim[1] - self.xlim[0], self.pf)
+                unit = get_smallest_appropriate_unit(
+                    self.xlim[1] - self.xlim[0], self.pf)
                 (unit_x, unit_y) = (unit, unit)
             else:
                 (unit_x, unit_y) = self._axes_unit_names
@@ -875,22 +886,27 @@
             else:
                 zlim = (None, None)
 
-            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            plot_aspect = \
+              (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
 
-            # This sets the size of the figure, and defaults to making one of the dimensions smaller.
-            # This should protect against giant images in the case of a very large aspect ratio.
+            # This sets the size of the figure, and defaults to making one of
+            # the dimensions smaller.  This should protect against giant images
+            # in the case of a very large aspect ratio.
             cbar_frac = 0.0
             if plot_aspect > 1.0:
-                size = (self.window_size*(1.+cbar_frac), self.window_size/plot_aspect)
+                size = (self.window_size*(1.+cbar_frac),
+                        self.window_size/plot_aspect)
             else:
-                size = (plot_aspect*self.window_size*(1.+cbar_frac), self.window_size)
+                size = (plot_aspect*self.window_size*(1.+cbar_frac),
+                        self.window_size)
 
             # Correct the aspect ratio in case unit_x and unit_y are different
             aspect = self.pf[unit_x]/self.pf[unit_y]
 
             image = self._frb[f]
 
-            if image.max() == image.min() and self._field_transform[f] == log_transform:
+            if image.max() == image.min():
+              if self._field_transform[f] == log_transform:
                 mylog.warning("Plot image for field %s has zero dynamic " \
                               "range. Min = Max = %d." % \
                               (f, image.max()))
@@ -978,8 +994,8 @@
         :py:class:`matplotlib.font_manager.FontProperties`.
 
         Possible keys include
-        * family - The font family. Can be serif, sans-serif, cursive, 'fantasy' or
-          'monospace'.
+        * family - The font family. Can be serif, sans-serif, cursive,
+          fantasy, monospace, or a specific font name.
         * style - The font style. Either normal, italic or oblique.
         * color - A valid color string like 'r', 'g', 'red', 'cobalt', and
           'orange'.
@@ -1128,8 +1144,8 @@
             raise YTNotInsideNotebook
 
     def display(self, name=None, mpl_kwargs=None):
-        """Will attempt to show the plot in in an IPython notebook.  Failing that, the 
-        plot will be saved to disk."""
+        """Will attempt to show the plot in in an IPython notebook.  Failing
+        that, the plot will be saved to disk."""
         try:
             return self.show()
         except YTNotInsideNotebook:
@@ -1155,7 +1171,8 @@
          or the axis name itself
     fields : string
          The name of the field(s) to be plotted.
-    center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+    center : two or three-element vector of sequence floats, 'c', or 'center',
+             or 'max'
          The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
@@ -1175,12 +1192,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     axes_unit : A string
          The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
@@ -1191,14 +1208,15 @@
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
          The second index is the x-location, given as 'left', 'right', or
-         'center'.  Finally, the whether the origin is applied in 'domain' space,
-         plot 'window' space or 'native' simulation coordinate system is given.
-         For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
-         both place the origin in the upper right hand corner of domain space.
-         If x or y are not given, a value is inffered.  For instance, 'left-domain'
-         corresponds to the lower-left hand corner of the simulation domain,
-         'center-domain' corresponds to the center of the simulation domain,
-         or 'center-window' for the center of the plot window. Further examples:
+         'center'.  Finally, the whether the origin is applied in 'domain'
+         space, plot 'window' space or 'native' simulation coordinate system
+         is given. For example, both 'upper-right-domain' and ['upper',
+         'right', 'domain'] both place the origin in the upper right hand
+         corner of domain space. If x or y are not given, a value is inffered.
+         For instance, 'left-domain' corresponds to the lower-left hand corner
+         of the simulation domain, 'center-domain' corresponds to the center
+         of the simulation domain, or 'center-window' for the center of the
+         plot window. Further examples:
 
          ==================================     ============================
          format                                 example
@@ -1215,7 +1233,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
@@ -1244,7 +1263,7 @@
         slc = pf.h.slice(axis, center[axis],
             field_parameters = field_parameters, center=center)
         slc.get_data(fields)
-        PWViewerMPL.__init__(self, slc, bounds, origin=origin,
+        PWViewerMPL.__init__(self, slc, bounds, fields=fields, origin=origin,
                              fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
@@ -1268,7 +1287,8 @@
          or the axis name itself
     fields : string
         The name of the field(s) to be plotted.
-    center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+    center : two or three-element vector of sequence floats, 'c', or 'center',
+             or 'max'
          The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
@@ -1288,12 +1308,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     axes_unit : A string
          The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
@@ -1304,14 +1324,15 @@
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
          The second index is the x-location, given as 'left', 'right', or
-         'center'.  Finally, the whether the origin is applied in 'domain' space,
-         plot 'window' space or 'native' simulation coordinate system is given.
-         For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
-         both place the origin in the upper right hand corner of domain space.
-         If x or y are not given, a value is inffered.  For instance, 'left-domain'
-         corresponds to the lower-left hand corner of the simulation domain,
-         'center-domain' corresponds to the center of the simulation domain,
-         or 'center-window' for the center of the plot window. Further examples:
+         'center'.  Finally, the whether the origin is applied in 'domain'
+         space, plot 'window' space or 'native' simulation coordinate system
+         is given. For example, both 'upper-right-domain' and ['upper',
+         'right', 'domain'] both place the origin in the upper right hand
+         corner of domain space. If x or y are not given, a value is inffered.
+         For instance, 'left-domain' corresponds to the lower-left hand corner
+         of the simulation domain, 'center-domain' corresponds to the center
+         of the simulation domain, or 'center-window' for the center of the
+         plot window. Further examples:
 
          ==================================     ============================
          format                                 example
@@ -1327,8 +1348,8 @@
          ==================================     ============================
 
     data_source : AMR3DData Object
-         Object to be used for data selection.  Defaults to a region covering the
-         entire simulation.
+         Object to be used for data selection.  Defaults to a region covering
+         the entire simulation.
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
     max_level: int
@@ -1336,7 +1357,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
@@ -1352,8 +1374,8 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=18, 
-                 field_parameters=None, data_source=None):
+                 weight_field=None, max_level=None, origin='center-window',
+                 fontsize=18, field_parameters=None, data_source=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
@@ -1363,8 +1385,9 @@
             axes_unit = units
         if field_parameters is None: field_parameters = {}
         proj = pf.h.proj(fields, axis, weight_field=weight_field,
-                         center=center, data_source=data_source, field_parameters = field_parameters)
-        PWViewerMPL.__init__(self,proj,bounds,origin=origin,
+                         center=center, data_source=data_source,
+                         field_parameters = field_parameters)
+        PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
                              fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
@@ -1408,7 +1431,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
     """
 
     _plot_type = 'OffAxisSlice'
@@ -1417,7 +1441,8 @@
     def __init__(self, pf, normal, fields, center='c', width=None,
                  axes_unit=None, north_vector=None, fontsize=18,
                  field_parameters=None):
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal,center,width,pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
         if field_parameters is None: field_parameters = {}
@@ -1426,8 +1451,9 @@
         cutting.get_data(fields)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True,
-                             fontsize=fontsize)
+        PWViewerMPL.__init__(self, cutting, bounds, fields=fields,
+                             origin='center-window',periodic=False,
+                             oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 class OffAxisProjectionDummyDataSource(object):
@@ -1496,12 +1522,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     depth : A tuple or a float
         A tuple containing the depth to project thourhg and the string
         key of the unit: (width, 'unit').  If set to a float, code units
@@ -1530,18 +1556,23 @@
                  depth=(1, '1'), axes_unit=None, weight_field=None,
                  max_level=None, north_vector=None, volume=None, no_ghost=False,
                  le=None, re=None, interpolated=False, fontsize=18):
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
         if axes_unit is None and units != ('1', '1', '1'):
             axes_unit = units[:2]
         fields = ensure_list(fields)[:]
-        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
-        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
-                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
-                                                       le=le, re=re, north_vector=north_vector)
+        width = np.array((bounds[1] - bounds[0],
+                          bounds[3] - bounds[2],
+                          bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(
+            center_rot, pf, normal, width, fields, interpolated,
+            weight=weight_field,  volume=volume, no_ghost=no_ghost,
+            le=le, re=re, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self, OffAxisProj, bounds, origin='center-window', periodic=False,
-                             oblique=True, fontsize=fontsize)
+        PWViewerMPL.__init__(
+            self, OffAxisProj, bounds, fields=fields, origin='center-window',
+            periodic=False, oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 _metadata_template = """
@@ -1784,13 +1815,15 @@
             self._field_transform[field] = linear_transform
 
 class WindowPlotMPL(ImagePlotMPL):
-    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size, fontsize):
+    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size,
+                 fontsize):
         fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
         if np.any(np.array(axrect) < 0):
-            mylog.warning('The axis ratio of the requested plot is very narrow.  '
-                          'There is a good chance the plot will not look very good, '
-                          'consider making the plot manually using FixedResolutionBuffer '
-                          'and matplotlib.')
+            msg = 'The axis ratio of the requested plot is very narrow. ' \
+                  'There is a good chance the plot will not look very good, ' \
+                  'consider making the plot manually using ' \
+                  'FixedResolutionBuffer and matplotlib.'
+            mylog.warn(msg)
             axrect  = (0.07, 0.10, 0.80, 0.80)
             caxrect = (0.87, 0.10, 0.04, 0.80)
         ImagePlotMPL.__init__(self, fsize, axrect, caxrect, zlim)

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list