[Yt-svn] yt-commit r1485 - in trunk: tests yt yt/data_objects yt/extensions yt/extensions/enzo_test yt/extensions/volume_rendering yt/lagos yt/raven

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Mon Oct 19 18:15:45 PDT 2009


Author: mturk
Date: Mon Oct 19 18:15:42 2009
New Revision: 1485
URL: http://yt.enzotools.org/changeset/1485

Log:
Big commit from the mercurial repository and the named branch 'yt'.

This includes the following pieces of code:

 * Time series objects, still to be documented but soon to be the basis of the
2.0 release of yt.
 * enzo_test, which is currently unused and may be deleted without warning if
it goes nowhere, designed to be a regression test utility
 * Software volume renderer accepting arbitrary transfer functions and directly
ray casting through the volume.
 * Initial tests of the dataspace reader for particle data
 * Addition of a TransferShells function to RTIntegrator




Added:
   trunk/yt/data_objects/
   trunk/yt/data_objects/__init__.py
   trunk/yt/data_objects/analyzer_objects.py
   trunk/yt/data_objects/time_series.py
   trunk/yt/extensions/enzo_test/
   trunk/yt/extensions/enzo_test/DataProviders.py
   trunk/yt/extensions/enzo_test/ProblemSetup.py
   trunk/yt/extensions/enzo_test/ProblemVerification.py
   trunk/yt/extensions/enzo_test/SimulationTests.py
   trunk/yt/extensions/enzo_test/__init__.py
   trunk/yt/extensions/volume_rendering/
   trunk/yt/extensions/volume_rendering/FixedInterpolator.c
   trunk/yt/extensions/volume_rendering/FixedInterpolator.h
   trunk/yt/extensions/volume_rendering/TransferFunction.py
   trunk/yt/extensions/volume_rendering/VolumeIntegrator.pyx
   trunk/yt/extensions/volume_rendering/__init__.py
   trunk/yt/extensions/volume_rendering/grid_partitioner.py
   trunk/yt/extensions/volume_rendering/setup.py
   trunk/yt/extensions/volume_rendering/software_sampler.py
Modified:
   trunk/tests/test_lagos.py
   trunk/yt/extensions/setup.py
   trunk/yt/funcs.py
   trunk/yt/lagos/BaseDataTypes.py
   trunk/yt/lagos/DataReadingFuncs.py
   trunk/yt/lagos/EnzoDefs.py
   trunk/yt/lagos/HDF5LightReader.c
   trunk/yt/lagos/OutputTypes.py
   trunk/yt/lagos/RTIntegrator.pyx
   trunk/yt/lagos/UniversalFields.py
   trunk/yt/lagos/setup.py
   trunk/yt/raven/Callbacks.py

Modified: trunk/tests/test_lagos.py
==============================================================================
--- trunk/tests/test_lagos.py	(original)
+++ trunk/tests/test_lagos.py	Mon Oct 19 18:15:42 2009
@@ -394,6 +394,7 @@
         cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
         self.assertEqual(na.unique(cg["CellVolume"]).size, 1)
 
+
 class TestDiskDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
     def setUp(self):
         DataTypeTestingBase.setUp(self)
@@ -479,6 +480,7 @@
         self.region = self.data
         self.ind_to_get = na.where(self.region["Temperature"]>500)
         self.data = self.region.extract_region(self.ind_to_get)
+
     def testNumberOfEntries(self):
         self.assertEqual(self.ind_to_get[0].shape,
                         self.data["Density"].shape)

Added: trunk/yt/data_objects/__init__.py
==============================================================================
--- (empty file)
+++ trunk/yt/data_objects/__init__.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,2 @@
+from time_series import *
+from analyzer_objects import *

Added: trunk/yt/data_objects/analyzer_objects.py
==============================================================================
--- (empty file)
+++ trunk/yt/data_objects/analyzer_objects.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,61 @@
+class AnalysisTask(object):
+
+    def __init__(self, *args, **kwargs):
+        # This should only get called if the subclassed object
+        # does not override
+        if len(args) + len(kwargs) != len(self._params):
+            raise RuntimeError
+        self.__dict__.update(zip(self._params, args))
+        self.__dict__.update(kwargs)
+
+    def __repr__(self):
+        # Stolen from AMRData.__repr__
+        s = "%s: " % (self.__class__.__name__, self._analysis_type)
+        s += ", ".join(["%s=%s" % (i, getattr(self,i))
+                       for i in self._params])
+        return s
+
+class MaximumValue(AnalysisTask):
+    _params = ['field']
+
+    def eval(self, data_object):
+        v = data_object.quantities["MaxLocation"](
+                self.field, lazy_reader=True)[0]
+        return v
+
+class ParameterValue(AnalysisTask):
+    _params = ['parameter']
+
+    def __init__(self, parameter, cast=None):
+        self.parameter = parameter
+        if cast is None:
+            cast = lambda a: a
+        self.cast = cast
+
+    def eval(self, pf):
+        return self.cast(pf.get_parameter(parameter))
+
+class CurrentTimeYears(AnalysisTask):
+    _params = []
+
+    def eval(self, pf):
+        return pf["InitialTime"] * pf["years"]
+
+class SliceDataset(AnalysisTask):
+    _params = ['field', 'axis']
+
+    def eval(self, pf):
+        pass
+
+class SlicePlotDataset(AnalysisTask):
+    _params = ['field', 'axis', 'center']
+
+    def __init__(self, *args, **kwargs):
+        import yt.raven
+        self.raven = yt.raven
+        AnalysisTask.__init__(self, *args, **kwargs)
+
+    def eval(self, pf):
+        pc = self.raven.PlotCollection(pf, center = self.center)
+        pc.add_slice(self.field, self.axis)
+        return pc.save()[0]

Added: trunk/yt/data_objects/time_series.py
==============================================================================
--- (empty file)
+++ trunk/yt/data_objects/time_series.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,65 @@
+from yt.lagos import *
+import inspect, functools
+
+class TimeSeriesData(object):
+    def __init__(self, name):
+        self.outputs = []
+
+    def __iter__(self):
+        # We can make this fancier, but this works
+        return self.outputs.__iter__()
+
+_enzo_header = "DATASET WRITTEN "
+
+class EnzoTimeSeries(TimeSeriesData):
+    def __init__(self, name, output_log = "OutputLog"):
+        TimeSeriesData.__init__(self, name)
+        for line in open(output_log):
+            if not line.startswith(_enzo_header): continue
+            fn = line[len(_enzo_header):].strip()
+            self._insert(EnzoStaticOutput(fn))
+
+        for type_name in data_object_registry:
+            setattr(self, type_name, functools.partial(
+                TimeSeriesDataObject, self, type_name))
+
+    def __getitem__(self, key):
+        return self.outputs[key]
+        if isinstance(key, types.SliceType):
+            if isinstance(key.start, types.FloatType):
+                self.get_range(key.start, key.stop)
+        
+    def _insert(self, pf):
+        # We get handed an instantiated parameter file
+        # Here we'll figure out a couple things about it, and then stick it
+        # inside our list.
+        self.outputs.append(pf)
+        
+    def eval(self, tasks, obj=None):
+        if obj == None: obj = TimeSeriesDataObject(self, "all_data")
+        tasks = ensure_list(tasks)
+        return_values = []
+        for pf in self:
+            return_values.append([])
+            for task in tasks:
+                style = inspect.getargspec(task.eval)[0][1]
+                if style == 'pf': arg = pf
+                elif style == 'data_object': arg = obj.get(pf)
+                return_values[-1].append(task.eval(arg))
+        return return_values
+
+class TimeSeriesDataObject(object):
+    def __init__(self, time_series, data_object_name, *args, **kwargs):
+        self.time_series = weakref.proxy(time_series)
+        self.data_object_name = data_object_name
+        self._args = args
+        self._kwargs = kwargs
+
+    def eval(self, tasks):
+        return self.time_series.eval(tasks, self)
+
+    def get(self, pf):
+        # We get the type name, which corresponds to an attribute of the
+        # hierarchy
+        cls = getattr(pf.h, self.data_object_name)
+        return cls(*self._args, **self._kwargs)

Added: trunk/yt/extensions/enzo_test/DataProviders.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/enzo_test/DataProviders.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,69 @@
+"""
+This is a definition of a class for providing data to a simulation
+verification method
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2007-2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.mods import *
+
+class TestDataSource(object):
+    def provide(self, pf):
+        pass
+
+    def __call__(self, pf):
+        return self.provide(pf)
+
+class MaximumDensitySphere(TestDataSource):
+    def __init__(self, radius, unit):
+        self.radius = radius
+        self.unit = unit
+
+    def provide(self, pf):
+        v, c = pf.h.find_max("Density")
+        yield pf.h.sphere(c, self.radius/pf[self.unit])
+
+class HaloSpheres(TestDataSource):
+    def __init__(self, n_part):
+        self.n_part = n_part
+
+    def provide(self, pf):
+       halo_list = HaloFinder(pf)
+       for halo in halo_list:
+            if halo.get_size() < self.n_part: continue
+            yield halo.get_sphere()
+
+class Halos(TestDataSource):
+    def __init__(self):
+        pass
+
+    def provide(self, pf):
+       halo_list = HaloFinder(pf)
+       for halo in halo_list:
+            yield halo
+
+class EntireDataset(TestDataSource):
+    def __init__(self):
+        pass
+
+    def provide(self, pf):
+        yield pf.h.all_data()

Added: trunk/yt/extensions/enzo_test/ProblemSetup.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/enzo_test/ProblemSetup.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,92 @@
+"""
+This is a definition of a class for defining problem verification.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2007-2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.mods import *
+from yt.funcs import *
+from yt.extensions.EnzoSimulation import *
+from SimulationTests import test_registry
+
+from mercurial import commands, hg, ui
+import cPickle
+
+class Problem(object):
+    def __init__(self, name, pfname, repo_path="."):
+        self.name = name
+        self.pfname = pfname
+        self.simulation = EnzoSimulation(pfname)
+        self.tests, self.test_types = [], {}
+        self.init_repo(repo_path)
+        self._initialize_tests()
+
+    def _add_existing_test(self, test_type):
+        @wraps(test_type.__init__)
+        def test_wrapper(name, *args, **kwargs):
+            new_test = test_type(name, *args, **kwargs)
+            self.tests.append(new_test)
+        return test_wrapper
+
+    def _initialize_tests(self):
+        for name, test in test_registry.iteritems():
+            setattr(self, 'add_%s' % name, self._add_existing_test(test))
+            self.test_types[name] = self._add_existing_test(test)
+
+    def init_repo(self, path):
+        self.ui = ui.ui()
+        try:
+            self.repo = hg.repository(self.ui, path)
+        except:
+            print "CREATING:", path
+            self.repo = hg.repository(self.ui, path, create=True)
+
+    def load_repo_file(self, fn, identifier='tip'):
+        return self.repo[identifier][fn].data()
+
+    def run_tests(self):
+        all_results = {}
+        try:
+            for pf in self.simulation:
+                results = {}
+                for test in self.tests:
+                    results[test.name] = test(pf)
+                all_results[str(pf)] = results
+        except IOError:
+            pass
+        return all_results
+
+    def store_results(self, results):
+        base_fn = "results_%s.cpkl" % self.name
+        fn = self.repo.pathto(base_fn)
+        cPickle.dump(results, open(fn, "w"))
+        if base_fn not in self.repo['tip'].manifest():
+            commands.add(self.ui, self.repo, fn)
+        message = "Committing results from current run"
+        commands.commit(self.ui, self.repo, fn, message=message)
+        print "Committed"
+
+    def __call__(self):
+        results = self.run_tests()
+        self.store_results((self.tests, results))
+
+# repo['tip']['yt/lagos/__init__.py'].data()

Added: trunk/yt/extensions/enzo_test/ProblemVerification.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/enzo_test/ProblemVerification.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,78 @@
+"""
+This is a definition of a set of classes for defining problem verification
+methods, along with storage mechanisms.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2007-2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.mods import *
+
+class VerificationMechanism(object):
+    def __init__(self):
+        pass
+
+    def __call__(self, pf, plot):
+        return self.run(pf)
+
+class ProfileVerification(VerificationMechanism):
+    def __init__(self, q1, q2,
+                 q1_limits = None, q1_nbins = 64):
+        VerificationMechanism.__init__(self)
+        self.q1 = q1
+        self.q2 = q2
+        self.q1_limits = q1_limits
+        self.q1_nbins = q1_nbins
+
+    def _setup_profile(self):
+        pass
+
+    def run(self, data):
+        limits = self.q1_limits
+        if limits is None:
+            limits = data.quantities["Extrema"](
+                self.q1, lazy_reader=True)[0]
+        prof = BinnedProfile1D(
+            data, self.q1_nbins, self.q1,
+            limits[0], limits[1], lazy_reader=True)
+        prof.add_fields(self.q2)
+        return na.array([prof[self.q1], prof[self.q2]])
+
+class RadialProfileVerification(VerificationMechanism):
+    def __init__(self, radius, unit, q2,
+                 r_limits = None, q2_limits = None,
+                 r_nbins = 64, q2_nbins = 64):
+        VerificationMechanism.__init__(self)
+        self.radius = radius
+        self.unit = unit
+        self.q2 = q2
+        self.q1_limits = q1_limits
+        self.q2_limits = q2_limits
+        self.q1_nbins = q1_nbins
+        self.q2_nbins = q2_nbins
+
+class TotalMassVerification(VerificationMechanism):
+    def __init__(self):
+        pass
+
+    def run(self, data):
+        return data.quantities["TotalQuantity"](
+                "CellMassMsun", lazy_reader=True)

Added: trunk/yt/extensions/enzo_test/SimulationTests.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/enzo_test/SimulationTests.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,70 @@
+"""
+This is a definition of a class for actually testing a result
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2007-2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.mods import *
+from ProblemVerification import *
+from DataProviders import *
+test_registry = {}
+
+class TestSimulation(object):
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if not hasattr(cls,'_ttype_name'): return
+            test_registry[cls._ttype_name] = cls
+
+    def __init__(self, name, source, verifier):
+        self.name = name
+        self.source = source
+        self.verifier = verifier
+
+    def __call__(self, pf, plot=True):
+        results = [self.verifier(d, plot)
+            for d in self.source(pf)]
+        return results
+
+class HaloProfiles(TestSimulation):
+    _ttype_name = "halo_profiles"
+    def __init__(self, name, n_part=200):
+        source = HaloSpheres(n_part=n_part)
+        verifier = ProfileVerification(
+            "Density", "Temperature", q1_nbins=32)
+        TestSimulation.__init__(self, name, source, verifier)
+
+class HaloBaryonMasses(TestSimulation):
+    _ttype_name = "halo_baryon_masses"
+    def __init__(self, name, n_part=200):
+        source = HaloSpheres(n_part=n_part)
+        verifier = TotalMassVerification()
+        TestSimulation.__init__(self, name, source, verifier)
+
+class RhoTempPDF(TestSimulation):
+    _ttype_name = "rho_temp_pdf"
+    def __init__(self, name):
+        source = EntireDataset()
+        verifier = ProfileVerification(
+            "Density", "Temperature", q1_nbins=32)
+        TestSimulation.__init__(self, name, source, verifier)
+

Added: trunk/yt/extensions/enzo_test/__init__.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/enzo_test/__init__.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,29 @@
+"""
+This is a definition of a class for defining problem verification.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2007-2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from ProblemVerification import *
+from DataProviders import *
+from SimulationTests import *
+from ProblemSetup import *

Modified: trunk/yt/extensions/setup.py
==============================================================================
--- trunk/yt/extensions/setup.py	(original)
+++ trunk/yt/extensions/setup.py	Mon Oct 19 18:15:42 2009
@@ -7,4 +7,5 @@
     config.make_config_py() # installs __config__.py
     config.make_svn_version_py()
     config.add_subpackage("lightcone")
+    config.add_subpackage("volume_rendering")
     return config

Added: trunk/yt/extensions/volume_rendering/FixedInterpolator.c
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/FixedInterpolator.c	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,74 @@
+/************************************************************************
+* Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+*
+* This file is part of yt.
+*
+* yt is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 3 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*
+************************************************************************/
+
+
+//
+// A small, tiny, itty bitty module for computation-intensive interpolation
+// that I can't seem to make fast in Cython
+//
+
+#include "FixedInterpolator.h"
+
+#define VINDEX(A,B,C) data[((((A)+ci[0])*(ds[1]+1)+((B)+ci[1]))*(ds[2]+1)+ci[2]+(C))]
+//  (((C*ds[1])+B)*ds[0]+A)
+
+npy_float64 fast_interpolate(int *ds, int *ci, npy_float64 *dp,
+                             npy_float64 *data)
+{
+    int i;
+    npy_float64 dv, dm[3];
+    for(i=0;i<3;i++)dm[i] = (1.0 - dp[i]);
+    dv  = 0.0;
+    dv += VINDEX(0,0,0) * (dm[0]*dm[1]*dm[2]);
+    dv += VINDEX(0,0,1) * (dm[0]*dm[1]*dp[2]);
+    dv += VINDEX(0,1,0) * (dm[0]*dp[1]*dm[2]);
+    dv += VINDEX(0,1,1) * (dm[0]*dp[1]*dp[2]);
+    dv += VINDEX(1,0,0) * (dp[0]*dm[1]*dm[2]);
+    dv += VINDEX(1,0,1) * (dp[0]*dm[1]*dp[2]);
+    dv += VINDEX(1,1,0) * (dp[0]*dp[1]*dm[2]);
+    dv += VINDEX(1,1,1) * (dp[0]*dp[1]*dp[2]);
+    /*assert(dv < -20);*/
+    return dv;
+}
+
+npy_float64 trilinear_interpolate(int *ds, int *ci, npy_float64 *dp,
+				  npy_float64 *data)
+{
+    /* dims is one less than the dimensions of the array */
+    int i;
+    npy_float64 dm[3], vz[4];
+  //dp is the distance to the plane.  dm is val, dp = 1-val
+    for(i=0;i<3;i++)dm[i] = (1.0 - dp[i]);
+    
+  //First interpolate in z
+    vz[0] = dm[2]*VINDEX(0,0,0) + dp[2]*VINDEX(0,0,1);
+    vz[1] = dm[2]*VINDEX(0,1,0) + dp[2]*VINDEX(0,1,1);
+    vz[2] = dm[2]*VINDEX(1,0,0) + dp[2]*VINDEX(1,0,1);
+    vz[3] = dm[2]*VINDEX(1,1,0) + dp[2]*VINDEX(1,1,1);
+
+  //Then in y
+    vz[0] = dm[1]*vz[0] + dp[1]*vz[1];
+    vz[1] = dm[1]*vz[2] + dp[1]*vz[3];
+
+  //Then in x
+    vz[0] = dm[0]*vz[0] + dp[0]*vz[1];
+    /*assert(dv < -20);*/
+    return vz[0];
+}

Added: trunk/yt/extensions/volume_rendering/FixedInterpolator.h
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/FixedInterpolator.h	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,40 @@
+/************************************************************************
+* Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+*
+* This file is part of yt.
+*
+* yt is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 3 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*
+************************************************************************/
+
+
+//
+// A small, tiny, itty bitty module for computation-intensive interpolation
+// that I can't seem to make fast in Cython
+//
+
+#include "Python.h"
+
+#include <stdio.h>
+#include <math.h>
+#include <signal.h>
+#include <ctype.h>
+
+#include "numpy/ndarrayobject.h"
+
+npy_float64 fast_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
+                             npy_float64 *data);
+
+npy_float64 trilinear_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
+				  npy_float64 *data);

Added: trunk/yt/extensions/volume_rendering/TransferFunction.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/TransferFunction.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,89 @@
+"""
+Simple transfer function editor
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+
+class TransferFunction(object):
+    def __init__(self, x_bounds, nbins=256):
+        self.nbins = nbins
+        self.x_bounds = x_bounds
+        self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
+        self.y = na.zeros(nbins, dtype='float64')
+
+    def add_gaussian(self, location, width, height):
+        vals = height * na.exp(-(self.x - location)**2.0/width)
+        self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+
+    def add_line(self, start, stop):
+        x0, y0 = start
+        x1, y1 = stop
+        slope = (y1-y0)/(x1-x0)
+        vals = na.zeros(self.x.shape, 'float64')
+        vals[(self.x >= x0) & (self.x <= x1)] = \
+            slope * (self.x - x0) + y0
+        self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+
+    def plot(self, filename):
+        import matplotlib;matplotlib.use("Agg");import pylab
+        pylab.clf()
+        pylab.plot(self.x, self.y, 'xk-')
+        pylab.xlim(*self.x_bounds)
+        pylab.ylim(0.0, 1.0)
+        pylab.savefig(filename)
+
+class ColorTransferFunction(object):
+    def __init__(self, x_bounds, nbins=256):
+        self.x_bounds = x_bounds
+        self.nbins = nbins
+        self.red = TransferFunction(x_bounds, nbins)
+        self.green = TransferFunction(x_bounds, nbins)
+        self.blue = TransferFunction(x_bounds, nbins)
+        self.alpha = TransferFunction(x_bounds, nbins)
+        self.funcs = (self.red, self.green, self.blue, self.alpha)
+
+    def add_gaussian(self, location, width, height):
+        for tf, v in zip(self.funcs, height):
+            tf.add_gaussian(location, width, v)
+
+    def plot(self, filename):
+        import matplotlib;matplotlib.use("Agg");import pylab
+        pylab.clf()
+        for c,tf in zip(['r','g','b'], self.funcs):
+            pylab.plot(tf.x, tf.y, '-' + c)
+            pylab.fill(tf.x, tf.y, c, alpha=0.2)
+        pylab.plot(self.alpha.x, self.alpha.y, '-k')
+        pylab.fill(self.alpha.x, self.alpha.y, 'k', alpha=0.1)
+        pylab.xlim(*self.x_bounds)
+        pylab.ylim(0.0, 1.0)
+        pylab.xlabel("Value")
+        pylab.ylabel("Transmission")
+        pylab.savefig(filename)
+
+if __name__ == "__main__":
+    tf = ColorTransferFunction((-20, -5))
+    tf.add_gaussian(-16.0, 0.4, [0.2, 0.3, 0.1])
+    tf.add_gaussian(-14.0, 0.8, [0.4, 0.1, 0.2])
+    tf.add_gaussian(-10.0, 1.0, [0.0, 0.0, 1.0])
+    tf.plot("tf.png")

Added: trunk/yt/extensions/volume_rendering/VolumeIntegrator.pyx
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/VolumeIntegrator.pyx	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,383 @@
+"""
+Simple integrators for the radiative transfer equation
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from stdlib cimport malloc, free, abs
+
+cdef inline int imax(int i0, int i1):
+    if i0 > i1: return i0
+    return i1
+
+cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
+    if f0 > f1: return f0
+    return f1
+
+cdef inline int imin(int i0, int i1):
+    if i0 < i1: return i0
+    return i1
+
+cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
+    if f0 < f1: return f0
+    return f1
+
+cdef inline int iclip(int i, int a, int b):
+    return imin(imax(i, a), b)
+
+cdef inline np.float64_t fclip(np.float64_t f,
+                      np.float64_t a, np.float64_t b):
+    return fmin(fmax(f, a), b)
+
+cdef extern from "math.h":
+    double exp(double x)
+    float expf(float x)
+    double floor(double x)
+    double ceil(double x)
+    double fmod(double x, double y)
+
+cdef extern from "FixedInterpolator.h":
+    np.float64_t fast_interpolate(int *ds, int *ci, np.float64_t *dp,
+                                  np.float64_t *data)
+cdef extern from "FixedInterpolator.h":
+    np.float64_t trilinear_interpolate(int *ds, int *ci, np.float64_t *dp,
+                                       np.float64_t *data)
+
+cdef class VectorPlane
+
+cdef class TransferFunctionProxy:
+    cdef np.float64_t x_bounds[2]
+    cdef np.float64_t *vs[4]
+    cdef int nbins
+    cdef np.float64_t dbin
+    cdef public object tf_obj
+    def __cinit__(self, tf_obj):
+        self.tf_obj = tf_obj
+        cdef np.ndarray[np.float64_t, ndim=1] temp
+        temp = tf_obj.red.y
+        self.vs[0] = <np.float64_t *> temp.data
+        temp = tf_obj.green.y
+        self.vs[1] = <np.float64_t *> temp.data
+        temp = tf_obj.blue.y
+        self.vs[2] = <np.float64_t *> temp.data
+        temp = tf_obj.alpha.y
+        self.vs[3] = <np.float64_t *> temp.data
+        self.x_bounds[0] = tf_obj.x_bounds[0]
+        self.x_bounds[1] = tf_obj.x_bounds[1]
+        self.nbins = tf_obj.nbins
+        self.dbin = (self.x_bounds[1] - self.x_bounds[0])/self.nbins
+
+    @cython.profile(True)
+    cdef void eval_transfer(self, np.float64_t dv[5], np.float64_t dt,
+                            np.float64_t *rgba):
+        cdef int i
+        cdef int bin_id, dti
+        cdef np.float64_t tf, trgba[4], bv, dx, dy, dd,ta
+        dx = self.dbin
+
+        for dti in range(0,4):
+            # get source alpha first
+            # First locate our points
+            bin_id = iclip(<int> floor((dv[dti] - self.x_bounds[0]) / dx),
+                            0, self.nbins-2)
+                # Recall that linear interpolation is y0 + (x-x0) * dx/dy
+            bv = self.vs[3][bin_id] # This is x0
+            dy = self.vs[3][bin_id+1]-bv # dy
+            dd = dv[dti]-(self.x_bounds[0] + bin_id * dx) # x - x0
+                # This is our final value for transfer function on the entering face
+            tf = bv+dd*(dy/dx) 
+            ta = tf  # Store the source alpha
+            for i in range(3):
+                # Recall that linear interpolation is y0 + (x-x0) * dx/dy
+                bv = self.vs[i][bin_id] # This is x0
+                dy = self.vs[i][bin_id+1]-bv # dy
+                dd = dv[dti]-(self.x_bounds[0] + bin_id * dx) # x - x0
+                # This is our final value for transfer function on the entering face
+                tf = bv+dd*(dy/dx) 
+                # alpha blending
+                rgba[i] += (1. - rgba[3])*ta*tf*dt
+            #update alpha
+            rgba[3] += (1. - rgba[3])*ta*dt
+            
+        # We should really do some alpha blending.
+        # Front to back blending is defined as:
+        #  dst.rgb = dst.rgb + (1 - dst.a) * src.a * src.rgb
+        #  dst.a   = dst.a   + (1 - dst.a) * src.a     
+
+cdef class VectorPlane:
+    cdef public object avp_pos, avp_dir, acenter, aimage
+    cdef np.float64_t *vp_pos, *vp_dir, *center, *image,
+    cdef np.float64_t pdx, pdy, bounds[4]
+    cdef int nv
+    cdef public object ax_vec, ay_vec
+    cdef np.float64_t *x_vec, *y_vec
+
+    def __cinit__(self, 
+                  np.ndarray[np.float64_t, ndim=3] vp_pos,
+                  np.ndarray[np.float64_t, ndim=1] vp_dir,
+                  np.ndarray[np.float64_t, ndim=1] center,
+                  bounds,
+                  np.ndarray[np.float64_t, ndim=3] image,
+                  np.ndarray[np.float64_t, ndim=1] x_vec,
+                  np.ndarray[np.float64_t, ndim=1] y_vec):
+        cdef int i, j
+        self.avp_pos = vp_pos
+        self.avp_dir = vp_dir
+        self.acenter = center
+        self.aimage = image
+        self.ax_vec = x_vec
+        self.ay_vec = y_vec
+        self.vp_pos = <np.float64_t *> vp_pos.data
+        self.vp_dir = <np.float64_t *> vp_dir.data
+        self.center = <np.float64_t *> center.data
+        self.image = <np.float64_t *> image.data
+        self.x_vec = <np.float64_t *> x_vec.data
+        self.y_vec = <np.float64_t *> y_vec.data
+        self.nv = vp_pos.shape[0]
+        for i in range(4): self.bounds[i] = bounds[i]
+        self.pdx = (self.bounds[1] - self.bounds[0])/self.nv
+        self.pdy = (self.bounds[3] - self.bounds[2])/self.nv
+
+    @cython.profile(True)
+    cdef void get_start_stop(self, np.float64_t *ex, int *rv):
+        # Extrema need to be re-centered
+        cdef np.float64_t cx, cy
+        cx = cy = 0.0
+        for i in range(3):
+            cx += self.center[i] * self.x_vec[i]
+            cy += self.center[i] * self.y_vec[i]
+        rv[0] = <int> floor((ex[0] - cx - self.bounds[0])/self.pdx)
+        rv[1] = rv[0] + <int> ceil((ex[1] - ex[0])/self.pdx)
+        rv[2] = <int> floor((ex[2] - cy - self.bounds[2])/self.pdy)
+        rv[3] = rv[2] + <int> ceil((ex[3] - ex[2])/self.pdy)
+
+    cdef inline void copy_into(self, np.float64_t *fv, np.float64_t *tv,
+                        int i, int j, int nk):
+        # We know the first two dimensions of our from-vector, and our
+        # to-vector is flat and 'ni' long
+        cdef int k
+        for k in range(nk):
+            tv[k] = fv[(((k*self.nv)+j)*self.nv+i)]
+
+    cdef inline void copy_back(self, np.float64_t *fv, np.float64_t *tv,
+                        int i, int j, int nk):
+        cdef int k
+        for k in range(nk):
+            tv[(((k*self.nv)+j)*self.nv+i)] = fv[k]
+
+cdef class PartitionedGrid:
+    cdef public object my_data
+    cdef public object LeftEdge
+    cdef public object RightEdge
+    cdef np.float64_t *data
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+    cdef np.float64_t dds[3]
+    cdef int dims[3]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def __cinit__(self,
+                  np.ndarray[np.float64_t, ndim=3] data,
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  np.ndarray[np.int64_t, ndim=1] dims):
+        # The data is likely brought in via a slice, so we copy it
+        cdef int i, j, k, size
+        self.LeftEdge = left_edge
+        self.RightEdge = right_edge
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.right_edge[i] = right_edge[i]
+            self.dims[i] = dims[i]
+            self.dds[i] = (self.right_edge[i] - self.left_edge[i])/dims[i]
+        self.my_data = data
+        self.data = <np.float64_t*> data.data
+        
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.profile(True)
+    def cast_plane(self, TransferFunctionProxy tf, VectorPlane vp):
+        # This routine will iterate over all of the vectors and cast each in
+        # turn.  Might benefit from a more sophisticated intersection check,
+        # like http://courses.csusm.edu/cs697exz/ray_box.htm
+        cdef int vi, vj, hit, i, ni, nj, nn
+        cdef int iter[4]
+        cdef np.float64_t v_pos[3], v_dir[3], rgba[4], extrema[4]
+        self.calculate_extent(vp, extrema)
+        vp.get_start_stop(extrema, iter)
+        for i in range(4): iter[i] = iclip(iter[i], 0, vp.nv)
+        hit = 0
+        for vj in range(iter[0], iter[1]):
+            for vi in range(iter[2], iter[3]):
+                vp.copy_into(vp.vp_pos, v_pos, vi, vj, 3)
+                vp.copy_into(vp.image, rgba, vi, vj, 4)
+                self.integrate_ray(v_pos, vp.vp_dir, rgba, tf)
+                vp.copy_back(rgba, vp.image, vi, vj, 4)
+        return hit
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.profile(True)
+    cdef void calculate_extent(self, VectorPlane vp,
+                               np.float64_t extrema[4]):
+        # We do this for all eight corners
+        cdef np.float64_t *edges[2], temp
+        edges[0] = self.left_edge
+        edges[1] = self.right_edge
+        extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
+        cdef int i, j, k
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    # This should rotate it into the vector plane
+                    temp  = edges[i][0] * vp.x_vec[0]
+                    temp += edges[j][1] * vp.x_vec[1]
+                    temp += edges[k][2] * vp.x_vec[2]
+                    if temp < extrema[0]: extrema[0] = temp
+                    if temp > extrema[1]: extrema[1] = temp
+                    temp  = edges[i][0] * vp.y_vec[0]
+                    temp += edges[j][1] * vp.y_vec[1]
+                    temp += edges[k][2] * vp.y_vec[2]
+                    if temp < extrema[2]: extrema[2] = temp
+                    if temp > extrema[3]: extrema[3] = temp
+        #print extrema[0], extrema[1], extrema[2], extrema[3]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.profile(True)
+    cdef int integrate_ray(self, np.float64_t v_pos[3],
+                                 np.float64_t v_dir[3],
+                                 np.float64_t rgba[4],
+                                 TransferFunctionProxy tf):
+        cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
+        cdef np.float64_t intersect_t = 1.0
+        cdef np.float64_t intersect[3], tmax[3], tdelta[3]
+        cdef np.float64_t enter_t, dist, alpha, dt
+        cdef np.float64_t tr, tl, temp_x, temp_y, dv[5]
+        for i in range(3):
+            if (v_dir[i] < 0):
+                step[i] = -1
+            else:
+                step[i] = 1
+            x = (i+1) % 3
+            y = (i+2) % 3
+            tl = (self.left_edge[i] - v_pos[i])/v_dir[i]
+            tr = (self.right_edge[i] - v_pos[i])/v_dir[i]
+            temp_x = (v_pos[x] + tl*v_dir[x])
+            temp_y = (v_pos[y] + tl*v_dir[y])
+            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
+               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
+               0.0 <= tl and tl < intersect_t:
+                direction = i
+                intersect_t = tl
+            temp_x = (v_pos[x] + tr*v_dir[x])
+            temp_y = (v_pos[y] + tr*v_dir[y])
+            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
+               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
+               0.0 <= tr and tr < intersect_t:
+                direction = i
+                intersect_t = tr
+        if self.left_edge[0] <= v_pos[0] and v_pos[0] <= self.right_edge[0] and \
+           self.left_edge[1] <= v_pos[1] and v_pos[1] <= self.right_edge[1] and \
+           self.left_edge[2] <= v_pos[2] and v_pos[2] <= self.right_edge[2]:
+            intersect_t = 0.0
+        if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+        for i in range(3):
+            intersect[i] = v_pos[i] + intersect_t * v_dir[i]
+            cur_ind[i] = <int> floor((intersect[i] +
+                                      step[i]*1e-8*self.dds[i] -
+                                      self.left_edge[i])/self.dds[i])
+            tmax[i] = (((cur_ind[i]+step[i])*self.dds[i])+
+                        self.left_edge[i]-v_pos[i])/v_dir[i]
+            if cur_ind[i] == self.dims[i] and step[i] < 0:
+                cur_ind[i] = self.dims[i] - 1
+            if cur_ind[i] < 0 or cur_ind[i] >= self.dims[i]: return 0
+            if step[i] > 0:
+                tmax[i] = (((cur_ind[i]+1)*self.dds[i])
+                            +self.left_edge[i]-v_pos[i])/v_dir[i]
+            if step[i] < 0:
+                tmax[i] = (((cur_ind[i]+0)*self.dds[i])
+                            +self.left_edge[i]-v_pos[i])/v_dir[i]
+            tdelta[i] = (self.dds[i]/v_dir[i])
+            if tdelta[i] < 0: tdelta[i] *= -1
+        # We have to jumpstart our calculation
+        enter_t = intersect_t
+        while 1:
+            # dims here is one less than the dimensions of the data,
+            # but we are tracing on the grid, not on the data...
+            if (not (0 <= cur_ind[0] < self.dims[0])) or \
+               (not (0 <= cur_ind[1] < self.dims[1])) or \
+               (not (0 <= cur_ind[2] < self.dims[2])):
+                break
+            hit += 1
+            if tmax[0] < tmax[1]:
+                if tmax[0] < tmax[2]:
+                    self.sample_values(v_pos, v_dir, enter_t, tmax[0], dv, cur_ind)
+                    cur_ind[0] += step[0]
+                    dt = fmin(tmax[0], 1.0) - enter_t
+                    enter_t = tmax[0]
+                    tmax[0] += tdelta[0]
+                else:
+                    self.sample_values(v_pos, v_dir, enter_t, tmax[2], dv, cur_ind)
+                    cur_ind[2] += step[2]
+                    dt = fmin(tmax[2], 1.0) - enter_t
+                    enter_t = tmax[2]
+                    tmax[2] += tdelta[2]
+            else:
+                if tmax[1] < tmax[2]:
+                    self.sample_values(v_pos, v_dir, enter_t, tmax[1], dv, cur_ind)
+                    cur_ind[1] += step[1]
+                    dt = fmin(tmax[1], 1.0) - enter_t
+                    enter_t = tmax[1]
+                    tmax[1] += tdelta[1]
+                else:
+                    self.sample_values(v_pos, v_dir, enter_t, tmax[2], dv, cur_ind)
+                    cur_ind[2] += step[2]
+                    dt = fmin(tmax[2], 1.0) - enter_t
+                    enter_t = tmax[2]
+                    tmax[2] += tdelta[2]
+            tf.eval_transfer(dv, dt, rgba)
+            if enter_t > 1.0: break
+        return hit
+
+    @cython.profile(True)
+    cdef void sample_values(self,
+                            np.float64_t v_pos[3],
+                            np.float64_t v_dir[3],
+                            np.float64_t enter_t,
+                            np.float64_t exit_t,
+                            np.float64_t dv[5],
+                            int ci[3]):
+        cdef np.float64_t cp[3], dp[3], temp, t, dt
+        cdef int dti, i
+        dt = (exit_t - enter_t) / 4.0 # five samples, so divide by four
+        #t = fclip(t, 0.0, 1.0)
+        for dti in range(5):
+            t = enter_t + dt * dti
+            for i in range(3):
+                cp[i] = v_pos[i] + t * v_dir[i]
+                dp[i] = fclip(fmod(cp[i], self.dds[i])/self.dds[i], 0, 1.0)
+            dv[dti] = trilinear_interpolate(self.dims, ci, dp, self.data)

Added: trunk/yt/extensions/volume_rendering/__init__.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/__init__.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,34 @@
+"""
+Import the components of the volume rendering extension
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+
+from TransferFunction import TransferFunction, ColorTransferFunction
+from VolumeIntegrator import PartitionedGrid, VectorPlane, \
+                             TransferFunctionProxy
+from grid_partitioner import partition_all_grids, partition_grid, \
+                             export_partitioned_grids, \
+                             import_partitioned_grids
+from software_sampler import direct_ray_cast

Added: trunk/yt/extensions/volume_rendering/grid_partitioner.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/grid_partitioner.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,144 @@
+"""
+Import the components of the volume rendering extension
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+from yt.funcs import *
+import h5py
+
+from VolumeIntegrator import PartitionedGrid
+
+def partition_grid(start_grid, field, log_field = True, threshold = None):
+    if threshold is not None:
+        if start_grid[field].max() < threshold[0] or \
+           start_grid[field].min() > threshold[1]: return None
+    to_cut_up = start_grid.get_vertex_centered_data(field).astype('float64')
+
+    if log_field: to_cut_up = na.log10(to_cut_up)
+    assert(na.any(na.isnan(to_cut_up)) == False)
+
+    if len(start_grid.Children) == 0:
+        pg = PartitionedGrid(
+                to_cut_up.copy(),
+                na.array(start_grid.LeftEdge, dtype='float64'),
+                na.array(start_grid.RightEdge, dtype='float64'),
+                na.array(start_grid.ActiveDimensions, dtype='int64'))
+        return [pg]
+
+    x_vert = [0, start_grid.ActiveDimensions[0]]
+    y_vert = [0, start_grid.ActiveDimensions[1]]
+    z_vert = [0, start_grid.ActiveDimensions[2]]
+
+    for grid in start_grid.Children:
+        gi = start_grid.get_global_startindex()
+        si = grid.get_global_startindex()/2 - gi
+        ei = si + grid.ActiveDimensions/2 
+        x_vert += [si[0], ei[0]]
+        y_vert += [si[1], ei[1]]
+        z_vert += [si[2], ei[2]]
+
+    cim = start_grid.child_index_mask
+
+    # Now we sort by our vertices, in axis order
+
+    x_vert.sort()
+    y_vert.sort()
+    z_vert.sort()
+
+    grids = []
+
+    covered = na.zeros(start_grid.ActiveDimensions)
+    for xs, xe in zip(x_vert[:-1], x_vert[1:]):
+        for ys, ye in zip(y_vert[:-1], y_vert[1:]):
+            for zs, ze in zip(z_vert[:-1], z_vert[1:]):
+                sl = (slice(xs, xe), slice(ys, ye), slice(zs, ze))
+                dd = cim[sl]
+                if dd.size == 0: continue
+                uniq = na.unique(dd)
+                if uniq.size > 1: continue
+                if uniq[0] > -1: continue
+                data = to_cut_up[xs:xe+1,ys:ye+1,zs:ze+1].copy()
+                dims = na.array(dd.shape, dtype='int64')
+                start_index = na.array([xs,ys,zs], dtype='int64')
+                left_edge = start_grid.LeftEdge + start_index * start_grid.dds
+                right_edge = left_edge + dims * start_grid.dds
+                grids.append(PartitionedGrid(
+                    data, left_edge, right_edge, dims))
+                covered[xs:xe,ys:ye,zs:ze] += 1
+    assert(na.all(covered == start_grid.child_mask))
+    assert(covered.max() <= 1)
+
+    return grids
+
+def partition_all_grids(grid_list, field = "Density",
+                        threshold = (-1e300, 1e300), eval_func = None):
+    new_grids = []
+    pbar = get_pbar("Partitioning ", len(grid_list))
+    if eval_func is None: eval_func = lambda a: True
+    for i, g in enumerate(grid_list):
+        if not eval_func(g): continue
+        pbar.update(i)
+        to_add = partition_grid(g, field, True, threshold)
+        if to_add is not None: new_grids += to_add
+    pbar.finish()
+    return na.array(new_grids, dtype='object')
+
+def export_partitioned_grids(grid_list, fn):
+    f = h5py.File(fn, "w")
+    pbar = get_pbar("Writing Grids", len(grid_list))
+    nelem = sum((grid.my_data.size for grid in grid_list))
+    ngrids = len(grid_list)
+    group = f.create_group("/PGrids")
+    left_edge = na.concatenate([[grid.LeftEdge,] for grid in grid_list])
+    f.create_dataset("/PGrids/LeftEdges", data=left_edge); del left_edge
+    right_edge = na.concatenate([[grid.RightEdge,] for grid in grid_list])
+    f.create_dataset("/PGrids/RightEdges", data=right_edge); del right_edge
+    dims = na.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
+    f.create_dataset("/PGrids/Dims", data=dims); del dims
+    data = na.concatenate([grid.my_data.ravel() for grid in grid_list])
+    f.create_dataset("/PGrids/Data", data=data); del data
+    f.close()
+    pbar.finish()
+
+def import_partitioned_grids(fn):
+    f = h5py.File(fn, "r")
+    n_groups = len(f.listnames())
+    grid_list = []
+    dims = f["/PGrids/Dims"][:]
+    left_edges = f["/PGrids/LeftEdges"][:]
+    right_edges = f["/PGrids/RightEdges"][:]
+    data = f["/PGrids/Data"][:]
+    pbar = get_pbar("Reading Grids", dims.shape[0])
+    curpos = 0
+    for i in xrange(dims.shape[0]):
+        gd = dims[i,:]
+        gle, gre = left_edges[i,:], right_edges[i,:]
+        gdata = data[curpos:curpos+gd.prod()].reshape(gd)
+        # Vertex -> Grid, so we -1 from dims in this
+        grid_list.append(PartitionedGrid(gdata, gle, gre, gd - 1))
+        curpos += gd.prod()
+        pbar.update(i)
+    pbar.finish()
+    f.close()
+    return na.array(grid_list, dtype='object')

Added: trunk/yt/extensions/volume_rendering/setup.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/setup.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+import setuptools
+import os, sys, os.path
+
+import os.path
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('volume_rendering',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
+    config.make_svn_version_py()
+    config.add_extension("VolumeIntegrator",
+        ["yt/extensions/volume_rendering/VolumeIntegrator.c",
+         "yt/extensions/volume_rendering/FixedInterpolator.c",])
+    return config

Added: trunk/yt/extensions/volume_rendering/software_sampler.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/software_sampler.py	Mon Oct 19 18:15:42 2009
@@ -0,0 +1,85 @@
+"""
+Import the components of the volume rendering extension
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+from yt.extensions.volume_rendering import *
+from yt.funcs import *
+
+def direct_ray_cast(pf, L, center, W, Nvec, tf, 
+                    partitioned_grids = None):
+    center = na.array(center, dtype='float64')
+
+    # This just helps us keep track of stuff, and it's cheap
+    cp = pf.h.cutting(L, center)
+    back_center = center - cp._norm_vec * W
+    front_center = center + cp._norm_vec * W
+    cylinder = pf.h.disk(back_center, L, na.sqrt(2)*W, 2*W)
+
+    if partitioned_grids == None:
+        partitioned_grids = partition_all_grids(cylinder._grids,
+                                    eval_func = lambda g: na.any(cylinder._get_point_indices(g)))
+    #partitioned_grids = partition_all_grids(pf.h.grids)
+
+    LE = (na.array([grid.LeftEdge for grid in partitioned_grids]) - back_center) * cp._norm_vec
+    RE = (na.array([grid.RightEdge for grid in partitioned_grids]) - back_center) * cp._norm_vec
+    DL = na.sum(LE, axis=1); del LE
+    DR = na.sum(RE, axis=1); del RE
+    dist = na.minimum(DL, DR)
+    ind = na.argsort(dist)
+    
+    image = na.zeros((Nvec,Nvec,4), dtype='float64', order='F')
+    image[:,:,3] = 0.0
+
+    # Now we need to generate regular x,y,z values in regular space for our vector
+    # starting places.
+    px, py = na.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
+    xv = cp._inv_mat[0,0]*px + cp._inv_mat[0,1]*py + cp.center[0]
+    yv = cp._inv_mat[1,0]*px + cp._inv_mat[1,1]*py + cp.center[1]
+    zv = cp._inv_mat[2,0]*px + cp._inv_mat[2,1]*py + cp.center[2]
+    vectors = na.array([xv, yv, zv], dtype='float64').transpose()
+    vectors = vectors.copy('F')
+    xp0, xp1 = px.min(), px.max()
+    yp0, yp1 = py.min(), py.max()
+
+    ng = partitioned_grids.size
+    norm_vec = cp._norm_vec
+    norm_vec = cp._norm_vec * (2.0*W)
+    hit = 0
+    tnow = time.time()
+    every = na.ceil(len(partitioned_grids) / 100.0)
+
+    vp = VectorPlane(vectors, norm_vec, back_center,
+                     (xp0, xp1, yp0, yp1), image, cp._x_vec, cp._y_vec)
+
+    tfp = TransferFunctionProxy(tf)
+
+    pbar = get_pbar("Ray casting ", len(partitioned_grids))
+    for i,g in enumerate(partitioned_grids[ind]):
+        if (i % every) == 0: 
+            pbar.update(i)
+        pos = g.cast_plane(tfp, vp)
+    pbar.finish()
+
+    return partitioned_grids, image, vectors, norm_vec, pos

Modified: trunk/yt/funcs.py
==============================================================================
--- trunk/yt/funcs.py	(original)
+++ trunk/yt/funcs.py	Mon Oct 19 18:15:42 2009
@@ -370,3 +370,9 @@
     sys.excepthook = rpdb.rpdb_excepthook
     del sys.argv[sys.argv.index("--rpdb")]
 
+#
+# Some exceptions
+#
+
+class NoCUDAException(Exception):
+    pass

Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py	(original)
+++ trunk/yt/lagos/BaseDataTypes.py	Mon Oct 19 18:15:42 2009
@@ -160,6 +160,20 @@
         self.set_field_parameter("center",na.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",na.zeros(3,dtype='float64'))
 
+    def _set_center(self, center):
+        if center is None:
+            pass
+        elif isinstance(center, (types.ListType, na.ndarray)):
+            center = na.array(center)
+        elif center == ("max"): # is this dangerous for race conditions?
+            center = pf.h.find_max("Density")
+        elif center.startswith("max_"):
+            center = pf.h.find_max(center[4:])
+        else:
+            center = na.array(center, dtype='float64')
+        self.center = center
+        self.set_field_parameter('center', center)
+
     def get_field_parameter(self, name, default=None):
         """
         This is typically only used by derived field functions, but
@@ -201,6 +215,15 @@
             for grid in self._grids: grid.clear_data()
         self.data = {}
 
+    def clear_cache(self):
+        """
+        Clears out all cache, freeing memory.
+        """
+        for _cm in self._cut_masks: del _cm
+        for _pi in self._point_indices: del _pi
+        for _field in self._vc_data:
+            for _vc in _field: del _vc
+
     def has_key(self, key):
         """
         Checks if a data field already exists.
@@ -485,7 +508,7 @@
         self.end_point = na.array(end_point, dtype='float64')
         self.vec = self.end_point - self.start_point
         #self.vec /= na.sqrt(na.dot(self.vec, self.vec))
-        self.center = self.start_point
+        self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
         self._dts, self._ts = {}, {}
         #self._refresh_data()
@@ -687,8 +710,7 @@
         Optionally supply fields.
         """
         AMR2DData.__init__(self, axis, fields, pf, **kwargs)
-        self.center = center
-        if center is not None: self.set_field_parameter('center',center)
+        self._set_center(center)
         self.coord = coord
         if node_name is False:
             self._refresh_data()
@@ -836,7 +858,7 @@
         The 'up' direction is guessed at automatically.
         """
         AMR2DData.__init__(self, 4, fields, **kwargs)
-        self.center = center
+        self._set_center(center)
         self.set_field_parameter('center',center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
@@ -1007,6 +1029,12 @@
                       na.outer(_co[1,:,:], self._y_vec)
         self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
 
+        try:
+            import IIIPointsInVolumeCUDA as pvc
+            self._pv = pvc.VolumeFinder(self._coord, self.dims)
+        except (ImportError, NoCUDAException):
+            self._pv = None
+
         if node_name is False:
             self._refresh_data()
         else:
@@ -1073,7 +1101,42 @@
             raise SyntaxError("Making a fixed resolution slice with "
                               "particles isn't supported yet.")
 
-    @time_execution
+    def reslice(self, normal, center, width):
+
+        # Cleanup
+        del self._coord
+        del self._pixelmask
+
+        self.center = center
+        self.width = width
+        self.dds = self.width / self.dims
+        self.set_field_parameter('center', center)
+        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
+        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        # First we try all three, see which has the best result:
+        vecs = na.identity(3)
+        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        ax = _t.argmax()
+        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
+        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self.set_field_parameter('cp_x_vec',self._x_vec)
+        self.set_field_parameter('cp_y_vec',self._y_vec)
+        self.set_field_parameter('cp_z_vec',self._norm_vec)
+        # Calculate coordinates of each pixel
+        _co = self.dds * \
+              (na.mgrid[-self.dims/2 : self.dims/2,
+                        -self.dims/2 : self.dims/2] + 0.5)
+
+        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
+                      na.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+
+        self._refresh_data()
+        return
+
+    #@time_execution
     def get_data(self, fields = None):
         """
         Iterates over the list of fields and generates/reads them all.
@@ -1109,9 +1172,12 @@
 
     def _get_point_indices(self, grid):
         if self._pixelmask.max() == 0: return []
-        k = PV.PointsInVolume(self._coord, self._pixelmask,
-                              grid.LeftEdge, grid.RightEdge,
-                              grid.child_mask, just_one(grid['dx']))
+        if self._pv is not None:
+            k = self._pv(grid)
+        else:
+            k = PV.PointsInVolume(self._coord, self._pixelmask,
+                                  grid.LeftEdge, grid.RightEdge,
+                                  grid.child_mask, just_one(grid['dx']))
         return k
 
     def _gen_node_name(self):
@@ -1139,7 +1205,7 @@
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
         self._field_cuts = field_cuts
         self.serialize = serialize
-        self.center = center
+        self._set_center(center)
         if center is not None: self.set_field_parameter('center',center)
         self._node_name = node_name
         self._initialize_source(source)
@@ -1544,7 +1610,7 @@
         for fields and quantities that require it.
         """
         AMRData.__init__(self, pf, fields, **kwargs)
-        self.center = center
+        self._set_center(center)
         self.set_field_parameter("center",center)
         self.coords = None
         self._grids = None
@@ -2316,7 +2382,7 @@
     def _get_level_array(self, level, fields):
         fields = ensure_list(fields)
         # We assume refinement by a factor of two
-        rf = float(self.pf["RefineBy"]**(self.level - level))
+        rf = self.pf["RefineBy"]**(self.level - level)
         dims = na.maximum(1,self.ActiveDimensions/rf) + 2
         dx = (self.right_edge-self.left_edge)/(dims-2)
         x,y,z = (na.mgrid[0:dims[0],0:dims[1],0:dims[2]].astype('float64')-0.5)\

Modified: trunk/yt/lagos/DataReadingFuncs.py
==============================================================================
--- trunk/yt/lagos/DataReadingFuncs.py	(original)
+++ trunk/yt/lagos/DataReadingFuncs.py	Mon Oct 19 18:15:42 2009
@@ -111,8 +111,13 @@
     t = t.swapaxes(0,2)
     return t
 
-def readDataPacked(self, field):
-    return HDF5LightReader.ReadData(self.filename, "/Grid%08i/%s" % (self.id, field)).swapaxes(0,2)
+def readDataPacked(self, field, dspace=None):
+    if dspace is not None:
+        dspace="/Grid%08i/%s" % (self.id, dspace)
+        return HDF5LightReader.ReadData(self.filename,
+                "/Grid%08i/%s" % (self.id, field), dspace).swapaxes(0,2)
+    return HDF5LightReader.ReadData(self.filename,
+            "/Grid%08i/%s" % (self.id, field)).swapaxes(0,2)
 
 def readDataSlicePacked(self, grid, field, axis, coord):
     """

Modified: trunk/yt/lagos/EnzoDefs.py
==============================================================================
--- trunk/yt/lagos/EnzoDefs.py	(original)
+++ trunk/yt/lagos/EnzoDefs.py	Mon Oct 19 18:15:42 2009
@@ -77,6 +77,7 @@
                  "TopGridDimensions": int,
                  "EOSSoundSpeed": float,
                  "EOSType": int,
+                 "NumberOfParticleAttributes": int,
                 }
 
 mpc_conversion = {'mpc'   : 1e0,

Modified: trunk/yt/lagos/HDF5LightReader.c
==============================================================================
--- trunk/yt/lagos/HDF5LightReader.c	(original)
+++ trunk/yt/lagos/HDF5LightReader.c	Mon Oct 19 18:15:42 2009
@@ -82,10 +82,12 @@
 {
     char *filename, *nodename;
 
+    char *dspacename = NULL;
     hsize_t *my_dims = NULL;
     hsize_t *my_max_dims = NULL;
     npy_intp *dims = NULL;
-    hid_t file_id, datatype_id, native_type_id, dataset, dataspace;
+    hid_t file_id, datatype_id, native_type_id, dataset, dataspace, dsetr,
+          memspace;
     herr_t my_error;
     htri_t file_exists;
     size_t type_size;
@@ -96,8 +98,8 @@
     file_id = datatype_id = native_type_id = dataset = 0;
     dataspace = 0;
 
-    if (!PyArg_ParseTuple(args, "ss",
-            &filename, &nodename))
+    if (!PyArg_ParseTuple(args, "ss|s",
+            &filename, &nodename, &dspacename))
         return PyErr_Format(_hdf5ReadError,
                "ReadHDF5DataSet: Invalid parameters.");
 
@@ -123,6 +125,12 @@
                  "ReadHDF5DataSet: Unable to open %s", filename);
         goto _fail;
     }
+    
+    dsetr = -1;
+    if(dspacename != NULL){
+      /*fprintf(stderr, "Getting dspace %s\n", dspacename);*/
+      dsetr = H5Dopen(file_id, dspacename);
+    }
 
     /* We turn off error reporting briefly, because it turns out that
        reading datasets with group names is more forgiving than finding
@@ -140,32 +148,56 @@
         goto _fail;
     }
 
-    dataspace = H5Dget_space(dataset);
-    if(dataspace < 0) {
+    if(dsetr >= 0) {
+      hdset_reg_ref_t reference[1];
+      my_error = H5Dread(dsetr, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, 
+          H5P_DEFAULT, reference);
+      if(my_error < 0) {
+        PyErr_Format(_hdf5ReadError,
+            "ReadHDF5DataSet: Unable to read particle reference (%s, %s, %s).",
+            filename, nodename, dspacename);
+        goto _fail;
+      }
+      H5Dclose(dsetr);
+      dataspace = H5Rget_region(file_id, H5R_DATASET_REGION, reference);
+      if(dataspace < 0) {
+        PyErr_Format(_hdf5ReadError,
+            "ReadHDF5DataSet: Unable to dereference particle dataspace (%s, %s).",
+            filename, nodename);
+        goto _fail;
+      }
+      my_rank = 1;
+      /* How do we keep this from leaking in failures? */
+      my_dims = malloc(sizeof(hsize_t) * my_rank);
+      my_max_dims = malloc(sizeof(hsize_t) * my_rank);
+      my_dims[0] = my_max_dims[0] = H5Sget_select_npoints(dataspace);
+    } else {
+      dataspace = H5Dget_space(dataset);
+      if(dataspace < 0) {
+        PyErr_Format(_hdf5ReadError,
+            "ReadHDF5DataSet: Unable to open dataspace (%s, %s).",
+            filename, nodename);
+        goto _fail;
+      }
+      my_rank = H5Sget_simple_extent_ndims( dataspace );
+      if(my_rank < 0) {
+        PyErr_Format(_hdf5ReadError,
+            "ReadHDF5DataSet: Problem getting dataset rank (%s, %s).",
+            filename, nodename);
+        goto _fail;
+      }
+
+      /* How do we keep this from leaking in failures? */
+      my_dims = malloc(sizeof(hsize_t) * my_rank);
+      my_max_dims = malloc(sizeof(hsize_t) * my_rank);
+      my_error = H5Sget_simple_extent_dims( dataspace, my_dims, my_max_dims );
+      if(my_error < 0) {
         PyErr_Format(_hdf5ReadError,
-                 "ReadHDF5DataSet: Unable to open dataspace (%s, %s).",
-                                    filename, nodename);
+            "ReadHDF5DataSet: Problem getting dataset dimensions (%s, %s).",
+            filename, nodename);
         goto _fail;
+      }
     }
-    my_rank = H5Sget_simple_extent_ndims( dataspace );
-    if(my_rank < 0) {
-        PyErr_Format(_hdf5ReadError,
-                 "ReadHDF5DataSet: Problem getting dataset rank (%s, %s).",
-                                    filename, nodename);
-        goto _fail;
-    }
-
-    /* How do we keep this from leaking in failures? */
-    my_dims = malloc(sizeof(hsize_t) * my_rank);
-    my_max_dims = malloc(sizeof(hsize_t) * my_rank);
-    my_error = H5Sget_simple_extent_dims( dataspace, my_dims, my_max_dims );
-    if(my_error < 0) {
-        PyErr_Format(_hdf5ReadError,
-                 "ReadHDF5DataSet: Problem getting dataset dimensions (%s, %s).",
-                                    filename, nodename);
-        goto _fail;
-    }
-
     dims = malloc(my_rank * sizeof(npy_intp));
     for (i = 0; i < my_rank; i++) dims[i] = (npy_intp) my_dims[i];
 
@@ -187,15 +219,23 @@
     if (!my_array) {
         PyErr_Format(_hdf5ReadError,
                  "ReadHDF5DataSet: Unable to create NumPy array.");
+      
         goto _fail;
     }
+    memspace = H5Screate_simple(my_rank, my_dims, NULL); 
+    /*fprintf(stderr, "Total Selected: %s %d %d %d\n",
+            dspacename,
+            (int) H5Sget_select_npoints(memspace),
+            (int) H5Sget_select_npoints(dataspace),
+            (int) my_rank);*/
 
-    H5Dread(dataset, native_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, my_array->data);
+    H5Dread(dataset, native_type_id, memspace, dataspace, H5P_DEFAULT, my_array->data);
 
     PyArray_UpdateFlags(my_array, NPY_OWNDATA | my_array->flags);
     PyObject *return_value = Py_BuildValue("N", my_array);
 
     H5Sclose(dataspace);
+    H5Sclose(memspace);
     H5Dclose(dataset);
     H5Tclose(native_type_id);
     H5Tclose(datatype_id);

Modified: trunk/yt/lagos/OutputTypes.py
==============================================================================
--- trunk/yt/lagos/OutputTypes.py	(original)
+++ trunk/yt/lagos/OutputTypes.py	Mon Oct 19 18:15:42 2009
@@ -224,36 +224,9 @@
         """
         if self.parameters.has_key(parameter):
             return self.parameters[parameter]
-
-        # Let's read the file
-        self.parameters["CurrentTimeIdentifier"] = \
-            int(os.stat(self.parameter_filename)[ST_CTIME])
-        lines = open(self.parameter_filename).readlines()
-        for lineI, line in enumerate(lines):
-            if line.find("#") >= 1: # Keep the commented lines
-                line=line[:line.find("#")]
-            line=line.strip().rstrip()
-            if len(line) < 2:
-                continue
-            try:
-                param, vals = map(strip,map(rstrip,line.split("=")))
-            except ValueError:
-                mylog.error("ValueError: '%s'", line)
-            if parameter == param:
-                if type is None:
-                    t = vals.split()
-                else:
-                    t = map(type, vals.split())
-                if len(t) == 1:
-                    self.parameters[param] = t[0]
-                else:
-                    self.parameters[param] = t
-                if param.endswith("Units") and not param.startswith("Temperature"):
-                    dataType = param[:-5]
-                    self.conversion_factors[dataType] = self.parameters[param]
-                return self.parameters[parameter]
-
-        return ""
+        for line in open(self.parameter_filename):
+            if line.startswith(parameter):
+                return line.split("=", 1)[1]
 
     def _parse_parameter_file(self):
         """

Modified: trunk/yt/lagos/RTIntegrator.pyx
==============================================================================
--- trunk/yt/lagos/RTIntegrator.pyx	(original)
+++ trunk/yt/lagos/RTIntegrator.pyx	Mon Oct 19 18:15:42 2009
@@ -26,15 +26,20 @@
 import numpy as np
 cimport numpy as np
 cimport cython
+from stdlib cimport malloc, free, abs
+
+cdef extern from "math.h":
+    double exp(double x)
+    float expf(float x)
 
 @cython.boundscheck(False)
-def Transfer3D(np.ndarray[np.float_t, ndim=2] i_s,
-               np.ndarray[np.float_t, ndim=3] o_s,
-               np.ndarray[np.float_t, ndim=3] e,
-               np.ndarray[np.float_t, ndim=3] a,
+def Transfer3D(np.ndarray[np.float64_t, ndim=3] i_s,
+               np.ndarray[np.float64_t, ndim=4] o_s,
+               np.ndarray[np.float64_t, ndim=4] e,
+               np.ndarray[np.float64_t, ndim=4] a,
                int imin, int imax, int jmin, int jmax,
                int kmin, int kmax, int istride, int jstride,
-               float dx):
+               np.float64_t dx):
     """
     This function accepts an incoming slab (*i_s*), a buffer
     for an outgoing set of values at every point in the grid (*o_s*),
@@ -46,16 +51,64 @@
     cdef int i, ii
     cdef int j, jj
     cdef int k, kk
-    cdef float temp
+    cdef int n, nn
+    nn = o_s.shape[3] # This might be slow
+    cdef np.float64_t *temp = <np.float64_t *>malloc(sizeof(np.float64_t) * nn)
     for i in range((imax-imin)*istride):
         ii = i + imin*istride
         for j in range((jmax-jmin)*jstride):
             jj = j + jmin*jstride
-            temp = i_s[ii,jj]
+            # Not sure about the ordering of the loops here
+            for n in range(nn):
+                temp[n] = i_s[ii,jj,n] 
             for k in range(kmax-kmin):
-                o_s[i,j,k] = temp + dx*(e[i,j,k] - temp*a[i,j,k])
-                temp = o_s[i,j,k]
-            i_s[ii,jj] = temp
+                kk = k + kmin#*kstride, which doesn't make any sense
+                for n in range(nn):
+                    o_s[i,j,k,n] = temp[n] + dx*(e[i,j,k,n] - temp[n]*a[i,j,k,n])
+                    temp[n] = o_s[i,j,k,n]
+            for n in range(nn):
+                i_s[ii,jj,n] = temp[n]
+    free(temp)
+
+ at cython.boundscheck(False)
+def TransferShells(np.ndarray[np.float64_t, ndim=3] i_s,
+                   np.ndarray[np.float64_t, ndim=3] data,
+                   np.ndarray[np.float64_t, ndim=2] shells):
+    """
+    This function accepts an incoming slab (*i_s*), a buffer of *data*,
+    and a list of shells specified as [ (value, tolerance, r, g, b), ... ].
+    """
+    cdef int i, ii
+    cdef int j, jj
+    cdef int k, kk
+    cdef int n, nn
+    cdef np.float64_t dist
+    ii = data.shape[0]
+    jj = data.shape[1]
+    kk = data.shape[2]
+    nn = shells.shape[0]
+    cdef float rgba[4]
+    cdef float alpha
+    for i in range(ii):
+        for j in range(jj):
+            # Not sure about the ordering of the loops here
+            for k in range(kk):
+                for n in range(nn):
+                    dist = shells[n, 0] - data[i,j,k]
+                    if dist < 0: dist *= -1.0
+                    if dist < shells[n,1]:
+                        dist = exp(-dist/8.0)
+                        rgba[0] = shells[n,2]
+                        rgba[1] = shells[n,3]
+                        rgba[2] = shells[n,4]
+                        rgba[3] = shells[n,5]
+                        alpha = i_s[i,j,3]
+                        dist *= dist # This might improve appearance
+                        i_s[i,j,0] += (1.0 - alpha)*rgba[0]*dist*rgba[3]
+                        i_s[i,j,1] += (1.0 - alpha)*rgba[1]*dist*rgba[3]
+                        i_s[i,j,2] += (1.0 - alpha)*rgba[2]*dist*rgba[3]
+                        i_s[i,j,3] += (1.0 - alpha)*rgba[3]*dist*rgba[3]
+                        break
 
 @cython.boundscheck(False)
 def Transfer1D(float i_s,
@@ -118,7 +171,6 @@
     if not (0 <= intersect_t <= 1): return
     # Now get the indices of the intersection
     intersect = u + intersect_t * v
-    cdef int ncells = 0
     for i in range(3):
         cur_ind[i] = np.floor((intersect[i] + 1e-8*dx[i] - left_edge[i])/dx[i])
         tmax[i] = (((cur_ind[i]+step[i])*dx[i])+left_edge[i]-u[i])/v[i]
@@ -126,7 +178,8 @@
             cur_ind[i] = grid_mask.shape[i] - 1
         if step[i] > 0: tmax[i] = (((cur_ind[i]+1)*dx[i])+left_edge[i]-u[i])/v[i]
         if step[i] < 0: tmax[i] = (((cur_ind[i]+0)*dx[i])+left_edge[i]-u[i])/v[i]
-        tdelta[i] = np.abs((dx[i]/v[i]))
+        tdelta[i] = (dx[i]/v[i])
+        if tdelta[i] < 0: tdelta[i] *= -1
     # The variable intersect contains the point we first pierce the grid
     enter_t = intersect_t
     while 1:
@@ -141,7 +194,6 @@
         if (tmax[0] > 1.0) and (tmax[1] > 1.0) and (tmax[2] > 1.0):
             grid_dt[cur_ind[0], cur_ind[1], cur_ind[2]] = 1.0 - enter_t
             break
-        ncells += 1
         if tmax[0] < tmax[1]:
             if tmax[0] < tmax[2]:
                 grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = enter_t
@@ -169,3 +221,154 @@
                 tmax[2] += tdelta[2]
                 cur_ind[2] += step[2]
     return
+
+ at cython.wraparound(False)
+ at cython.boundscheck(False)
+def PlaneVoxelIntegration(np.ndarray[np.float64_t, ndim=1] left_edge,
+                          np.ndarray[np.float64_t, ndim=1] right_edge,
+                          np.ndarray[np.float64_t, ndim=1] dx,
+                          np.ndarray[np.float64_t, ndim=2] ug,
+                          np.ndarray[np.float64_t, ndim=1] v,
+                          np.ndarray[np.float64_t, ndim=2] image,
+                          np.ndarray[np.float64_t, ndim=3] data,
+                          np.ndarray[np.float64_t, ndim=2] shells):
+    # We're roughly following Amanatides & Woo on a ray-by-ray basis
+    # Note that for now it's just shells, but this can and should be
+    # generalized to transfer functions
+    cdef int i, x, y, vi
+    intersect_t = 1
+    dt_tolerance = 1e-6
+    cdef int nv = ug.shape[0]
+    cdef int nshells = shells.shape[0]
+    cdef np.ndarray[np.float64_t, ndim=1] u = np.empty((3,), dtype=np.float64)
+    # Copy things into temporary location for passing between functions
+    for vi in range(nv):
+        for i in range(3): u[i] = ug[vi, i]
+        integrate_ray(u, v, left_edge, right_edge, dx, 
+                      nshells, vi, data, shells, image)
+
+ at cython.wraparound(False)
+ at cython.boundscheck(False)
+def integrate_ray(np.ndarray[np.float64_t, ndim=1] u,
+                  np.ndarray[np.float64_t, ndim=1] v,
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  np.ndarray[np.float64_t, ndim=1] dx,
+                  int nshells, int ind,
+                  np.ndarray[np.float64_t, ndim=3] data,
+                  np.ndarray[np.float64_t, ndim=2] shells,
+                  np.ndarray[np.float64_t, ndim=2] image):
+    cdef int step[3], x, y, i, n
+    cdef np.float64_t intersect_t = 1
+    cdef np.float64_t dt_tolerance = 1e-6
+    cdef np.float64_t tl, tr, enter_t, exit_t
+    cdef np.int64_t cur_ind[3]
+    cdef np.float64_t tdelta[3]
+    cdef np.float64_t tmax[3]
+    cdef np.float64_t intersect[3]
+    cdef np.float64_t dt, dv
+    cdef np.float64_t dist, alpha
+    cdef np.float64_t one = 1.0
+    cdef int dims[3]
+    cdef np.float64_t rgba[4], temp_x, temp_y
+    for i in range(3):
+        # As long as we're iterating, set some other stuff, too
+        dims[i] = data.shape[i]
+        if(v[i] < 0): step[i] = -1
+        else: step[i] = 1
+        x = (i+1)%3
+        y = (i+2)%3
+        tl = (left_edge[i] - u[i])/v[i]
+        tr = (right_edge[i] - u[i])/v[i]
+        temp_x = (u[x] + tl*v[x])
+        temp_y = (u[y] + tl*v[y])
+        if (left_edge[x] <= temp_x) and (temp_x <= right_edge[x]) and \
+           (left_edge[y] <= temp_y) and (temp_y <= right_edge[y]) and \
+           (0.0 <= tl) and (tl < intersect_t):
+            intersect_t = tl
+        temp_x = (u[x] + tr*v[x])
+        temp_y = (u[y] + tr*v[y])
+        if (left_edge[x] <= temp_x) and (temp_x <= right_edge[x]) and \
+           (left_edge[y] <= temp_y) and (temp_y <= right_edge[y]) and \
+           (0.0 <= tr) and (tr < intersect_t):
+            intersect_t = tr
+    # if fully enclosed
+    if (left_edge[0] <= u[0] <= right_edge[0]) and \
+       (left_edge[1] <= u[1] <= right_edge[1]) and \
+       (left_edge[2] <= u[2] <= right_edge[2]):
+        intersect_t = 0.0
+    if not (0 <= intersect_t <= 1):
+        #print "Returning: intersect_t ==", intersect_t
+        return
+    # Now get the indices of the intersection
+    for i in range(3): intersect[i] = u[i] + intersect_t * v[i]
+    cdef int ncells = 0
+    for i in range(3):
+        cur_ind[i] = np.floor((intersect[i] + 1e-8*dx[i] - left_edge[i])/dx[i])
+        tmax[i] = (((cur_ind[i]+step[i])*dx[i])+left_edge[i]-u[i])/v[i]
+        if cur_ind[i] == dims[i] and step[i] < 0:
+            cur_ind[i] = dims[i] - 1
+        if step[i] > 0: tmax[i] = (((cur_ind[i]+1)*dx[i])+left_edge[i]-u[i])/v[i]
+        if step[i] < 0: tmax[i] = (((cur_ind[i]+0)*dx[i])+left_edge[i]-u[i])/v[i]
+        tdelta[i] = (dx[i]/v[i])
+        if tdelta[i] < 0: tdelta[i] *= -1
+    # The variable intersect contains the point we first pierce the grid
+    enter_t = intersect_t
+    if (not (0 <= cur_ind[0] < dims[0])) or \
+       (not (0 <= cur_ind[1] < dims[1])) or \
+       (not (0 <= cur_ind[2] < dims[2])):
+        #print "Returning: cur_ind", cur_ind[0], cur_ind[1], cur_ind[2]
+        #print "  dims:     ", dims[0], dims[1], dims[2]
+        #print "  intersect:",  intersect[0], intersect[1], intersect[2]
+        #print "  intersect:", intersect_t
+        #print "  u        :", u[0], u[1], u[2]
+        #
+        return
+    #print cur_ind[0], dims[0], cur_ind[1], dims[1], cur_ind[2], dims[2]
+    dv = data[cur_ind[0], cur_ind[1], cur_ind[2]]
+    #dt = 1e300
+    while 1:
+        if image[ind,3] >= 1.0: break
+        if (not (0 <= cur_ind[0] < dims[0])) or \
+           (not (0 <= cur_ind[1] < dims[1])) or \
+           (not (0 <= cur_ind[2] < dims[2])):
+            break
+        # Do our transfer here
+        for n in range(nshells):
+            dist = shells[n, 0] - dv
+            if dist < shells[n,1]:
+                dist = exp(-dist/8.0)
+                alpha = (1.0 - shells[n,5])*shells[n,5]#*dt
+                image[ind,0] += alpha*shells[n,2]*dist
+                image[ind,1] += alpha*shells[n,3]*dist
+                image[ind,2] += alpha*shells[n,4]*dist
+                image[ind,3] += alpha*shells[n,5]*dist
+                #image[ind,i] += rgba[i]*dist*rgba[3]/dt
+                #print rgba[i], image[ind,i], dist, dt
+                break
+        if (tmax[0] > 1.0) and (tmax[1] > 1.0) and (tmax[2] > 1.0):
+            dt = 1.0 - enter_t
+            break
+        if tmax[0] < tmax[1]:
+            if tmax[0] < tmax[2]:
+                dt = tmax[0] - enter_t
+                enter_t = tmax[0]
+                tmax[0] += tdelta[0]
+                cur_ind[0] += step[0]
+            else:
+                dt = tmax[2] - enter_t
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+                cur_ind[2] += step[2]
+        else:
+            if tmax[1] < tmax[2]:
+                dt = tmax[1] - enter_t
+                enter_t = tmax[1]
+                tmax[1] += tdelta[1]
+                cur_ind[1] += step[1]
+            else:
+                dt = tmax[2] - enter_t
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+                cur_ind[2] += step[2]
+        dv = data[cur_ind[0], cur_ind[1], cur_ind[2]]

Modified: trunk/yt/lagos/UniversalFields.py
==============================================================================
--- trunk/yt/lagos/UniversalFields.py	(original)
+++ trunk/yt/lagos/UniversalFields.py	Mon Oct 19 18:15:42 2009
@@ -130,23 +130,29 @@
 add_field("SoundSpeed", function=_SoundSpeed,
           units=r"\rm{cm}/\rm{s}")
 
-def particle_func(p_field, dtype='float64'):
+def particle_func(p_field, dtype='float64', ptype=None):
+    dspace = None
+    if ptype is not None:
+        dspace = "AddressParticleType%02i" % ptype
     def _Particles(field, data):
         if not data.NumberOfParticles > 0:
             return na.array([], dtype=dtype)
         try:
-            return data._read_data(p_field).astype(dtype)
+            return data._read_data(p_field, dspace=dspace).astype(dtype)
         except data._read_exception:
             pass
         # This is bad.  But it's the best idea I have right now.
-        return data._read_data(p_field.replace("_"," ")).astype(dtype)
+        return data._read_data(p_field.replace("_"," "), dspace=dspace).astype(dtype)
     return _Particles
-for pf in ["type", "mass"] + \
-          ["position_%s" % ax for ax in 'xyz']:
-    pfunc = particle_func("particle_%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
+for ptype in [None] + range(10):
+    for pf in ["type", "mass"] + \
+              ["position_%s" % ax for ax in 'xyz']:
+        pname = "particle_%s" % pf
+        if ptype is not None: pname = "PT%02i_%s" % (ptype, pname)
+        pfunc = particle_func("particle_%s" % (pf), ptype=ptype)
+        add_field(pname, function=pfunc,
+                  validators = [ValidateSpatial(0)],
+                  particle_type=True)
 
 def _convRetainInt(data):
     return 1

Modified: trunk/yt/lagos/setup.py
==============================================================================
--- trunk/yt/lagos/setup.py	(original)
+++ trunk/yt/lagos/setup.py	Mon Oct 19 18:15:42 2009
@@ -20,7 +20,7 @@
     config.make_config_py() # installs __config__.py
     config.make_svn_version_py()
     config.add_extension("PointCombine", "yt/lagos/PointCombine.c", libraries=["m"])
-    config.add_extension("RTIntegrator", "yt/lagos/RTIntegrator.c")
+    config.add_extension("RTIntegrator", "yt/lagos/RTIntegrator.c", libraries=["m"])
     config.add_extension("Interpolators", "yt/lagos/Interpolators.c")
     config.add_extension("PointsInVolume", "yt/lagos/PointsInVolume.c")
     #config.add_extension("DepthFirstOctree", "yt/lagos/DepthFirstOctree.c")

Modified: trunk/yt/raven/Callbacks.py
==============================================================================
--- trunk/yt/raven/Callbacks.py	(original)
+++ trunk/yt/raven/Callbacks.py	Mon Oct 19 18:15:42 2009
@@ -228,7 +228,7 @@
             if verts.size == 0: continue
             edgecolors = (0.0,0.0,0.0,self.alpha)
             grid_collection = matplotlib.collections.PolyCollection(
-                    verts, facecolors=(0.0,0.0,0.0,0.0),
+                    verts, facecolors="none",
                            edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)



More information about the yt-svn mailing list