[yt-svn] commit/yt: 2 new changesets
Bitbucket
commits-noreply at bitbucket.org
Thu Oct 4 21:55:06 PDT 2012
2 new commits in yt:
https://bitbucket.org/yt_analysis/yt/changeset/2eee5fc43d54/
changeset: 2eee5fc43d54
branch: yt
user: ngoldbaum
date: 2012-10-05 06:54:02
summary: Fixing a bug in the point callback pointed out by Elizabeth Tasker.
affected #: 1 file
diff -r d2daf94661086662a9efe6139fe88557b71bbff3 -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -745,12 +745,13 @@
self.text_args = text_args
def __call__(self, plot):
-
-
+ if len(self.pos) == 3:
+ pos = (self.pos[x_dict[plot.data.axis]],
+ self.pos[y_dict[plot.data.axis]])
+ else: pos = self.pos
width,height = plot.image._A.shape
- x,y = self.convert_to_plot(plot, self.pos)
- x,y = x/width,y/height
-
+ x,y = self.convert_to_plot(plot, pos)
+
plot._axes.text(x, y, self.text, **self.text_args)
class MarkerAnnotateCallback(PlotCallback):
https://bitbucket.org/yt_analysis/yt/changeset/066a7b53b64d/
changeset: 066a7b53b64d
branch: yt
user: ngoldbaum
date: 2012-10-05 06:54:30
summary: Merging
affected #: 38 files
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -400,7 +400,7 @@
echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478 PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338 bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6 reason-js-20120623.zip' > reason-js-20120623.zip.sha512
echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
@@ -429,7 +429,7 @@
[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
-get_ytproject Python-2.7.2.tgz
+get_ytproject Python-2.7.3.tgz
get_ytproject numpy-1.6.1.tar.gz
get_ytproject matplotlib-1.1.0.tar.gz
get_ytproject mercurial-2.2.2.tar.gz
@@ -554,11 +554,11 @@
fi
fi
-if [ ! -e Python-2.7.2/done ]
+if [ ! -e Python-2.7.3/done ]
then
echo "Installing Python. This may take a while, but don't worry. YT loves you."
- [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
- cd Python-2.7.2
+ [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
+ cd Python-2.7.3
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -133,6 +133,7 @@
else:
exec(operation)
+ if self.children is None: return
for child in self.children:
child.pass_down(operation)
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,6 +210,8 @@
LE, RE = self.hierarchy.grid_left_edge[id,:], \
self.hierarchy.grid_right_edge[id,:]
self.dds = np.array((RE - LE) / self.ActiveDimensions)
+ if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+ if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@property
@@ -364,8 +366,10 @@
self._child_index_mask = None
#@time_execution
- def __fill_child_mask(self, child, mask, tofill):
+ def __fill_child_mask(self, child, mask, tofill, dlevel = 1):
rf = self.pf.refine_by
+ if dlevel != 1:
+ rf = rf**dlevel
gi, cgi = self.get_global_startindex(), child.get_global_startindex()
startIndex = np.maximum(0, cgi / rf - gi)
endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
@@ -386,7 +390,7 @@
self.__fill_child_mask(child, self._child_mask, 0)
if self.OverlappingSiblings is not None:
for sibling in self.OverlappingSiblings:
- self.__fill_child_mask(sibling, self._child_mask, 0)
+ self.__fill_child_mask(sibling, self._child_mask, 0, 0)
self._child_indices = (self._child_mask==0) # bool, possibly redundant
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -938,6 +938,25 @@
validators=[ValidateParameter("cp_%s_vec" % ax)
for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
+def _CuttingPlaneBx(field, data):
+ x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+ for ax in 'xyz']
+ b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+ return np.dot(x_vec, b_vec)
+add_field("CuttingPlaneBx",
+ function=_CuttingPlaneBx,
+ validators=[ValidateParameter("cp_%s_vec" % ax)
+ for ax in 'xyz'], units=r"\rm{Gauss}")
+def _CuttingPlaneBy(field, data):
+ x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+ for ax in 'xyz']
+ b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+ return np.dot(y_vec, b_vec)
+add_field("CuttingPlaneBy",
+ function=_CuttingPlaneBy,
+ validators=[ValidateParameter("cp_%s_vec" % ax)
+ for ax in 'xyz'], units=r"\rm{Gauss}")
+
def _MeanMolecularWeight(field,data):
return (data["Density"] / (mh *data["NumberDensity"]))
add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")
@@ -975,12 +994,37 @@
units of Gauss. If you use MKS, make sure to write your own
MagneticEnergy field to deal with non-unitary \mu_0.
"""
- return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/2.
+ return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/(8*np.pi)
add_field("MagneticEnergy",function=_MagneticEnergy,
- units=r"",
- validators = [ValidateDataField("Bx"),
- ValidateDataField("By"),
- ValidateDataField("Bz")])
+ units=r"\rm{ergs}\/\rm{cm}^{-3}",
+ display_name=r"\rm{Magnetic}\/\rm{Energy}")
+
+def _BMagnitude(field,data):
+ """This assumes that your front end has provided Bx, By, Bz in
+ units of Gauss. If you use MKS, make sure to write your own
+ BMagnitude field to deal with non-unitary \mu_0.
+ """
+ return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
+add_field("BMagnitude",
+ function=_BMagnitude,
+ display_name=r"|B|", units=r"\rm{Gauss}")
+
+def _PlasmaBeta(field,data):
+ """This assumes that your front end has provided Bx, By, Bz in
+ units of Gauss. If you use MKS, make sure to write your own
+ PlasmaBeta field to deal with non-unitary \mu_0.
+ """
+ return data['Pressure']/data['MagneticEnergy']
+add_field("PlasmaBeta",
+ function=_PlasmaBeta,
+ display_name=r"\rm{Plasma}\/\beta", units="")
+
+def _MagneticPressure(field,data):
+ return data['MagneticEnergy']
+add_field("MagneticPressure",
+ function=_MagneticPressure,
+ display_name=r"\rm{Magnetic}\/\rm{Energy}",
+ units="\rm{ergs}\/\rm{cm}^{-3}")
def _VorticitySquared(field, data):
mylog.debug("Generating vorticity on %s", data)
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+ SkeletonGrid, \
+ SkeletonHierarchy, \
+ SkeletonStaticOutput
+
+from .fields import \
+ SkeletonFieldInfo, \
+ add_flash_field
+
+from .io import \
+ IOHandlerSkeleton
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+ AMRGridPatch
+from yt.data_objects.hierarchy import \
+ AMRHierarchy
+from yt.data_objects.static_output import \
+ StaticOutput
+from yt.utilities.definitions import \
+ mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+ io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+ FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+ _id_offset = 0
+ #__slots__ = ["_level_id", "stop_index"]
+ def __init__(self, id, hierarchy, level):
+ AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+ hierarchy = hierarchy)
+ self.Parent = None
+ self.Children = []
+ self.Level = level
+
+ def __repr__(self):
+ return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+ grid = SkeletonGrid
+ float_type = np.float64
+
+ def __init__(self, pf, data_style='skeleton'):
+ self.data_style = data_style
+ self.parameter_file = weakref.proxy(pf)
+ # for now, the hierarchy file is the parameter file!
+ self.hierarchy_filename = self.parameter_file.parameter_filename
+ self.directory = os.path.dirname(self.hierarchy_filename)
+ AMRHierarchy.__init__(self, pf, data_style)
+
+ def _initialize_data_storage(self):
+ pass
+
+ def _detect_fields(self):
+ # This needs to set a self.field_list that contains all the available,
+ # on-disk fields.
+ pass
+
+ def _count_grids(self):
+ # This needs to set self.num_grids
+ pass
+
+ def _parse_hierarchy(self):
+ # This needs to fill the following arrays, where N is self.num_grids:
+ # self.grid_left_edge (N, 3) <= float64
+ # self.grid_right_edge (N, 3) <= float64
+ # self.grid_dimensions (N, 3) <= int
+ # self.grid_particle_count (N, 1) <= int
+ # self.grid_levels (N, 1) <= int
+ # self.grids (N, 1) <= grid objects
+ #
+ pass
+
+ def _populate_grid_objects(self):
+ # For each grid, this must call:
+ # grid._prepare_grid()
+ # grid._setup_dx()
+ # This must also set:
+ # grid.Children <= list of child grids
+ # grid.Parent <= parent grid
+ # This is handled by the frontend because often the children must be
+ # identified.
+ pass
+
+class SkeletonStaticOutput(StaticOutput):
+ _hierarchy_class = SkeletonHierarchy
+ _fieldinfo_fallback = SkeletonFieldInfo
+ _fieldinfo_known = KnownSkeletonFields
+ _handle = None
+
+ def __init__(self, filename, data_style='skeleton',
+ storage_filename = None,
+ conversion_override = None):
+
+ if conversion_override is None: conversion_override = {}
+ self._conversion_override = conversion_override
+
+ StaticOutput.__init__(self, filename, data_style)
+ self.storage_filename = storage_filename
+
+ def _set_units(self):
+ # This needs to set up the dictionaries that convert from code units to
+ # CGS. The needed items are listed in the second entry:
+ # self.time_units <= sec_conversion
+ # self.conversion_factors <= mpc_conversion
+ # self.units <= On-disk fields
+ pass
+
+ def _parse_parameter_file(self):
+ # This needs to set up the following items:
+ #
+ # self.unique_identifier
+ # self.parameters <= full of code-specific items of use
+ # self.domain_left_edge <= array of float64
+ # self.domain_right_edge <= array of float64
+ # self.dimensionality <= int
+ # self.domain_dimensions <= array of int64
+ # self.current_time <= simulation time in code units
+ #
+ # We also set up cosmological information. Set these to zero if
+ # non-cosmological.
+ #
+ # self.cosmological_simulation <= int, 0 or 1
+ # self.current_redshift <= float
+ # self.omega_lambda <= float
+ # self.omega_matter <= float
+ # self.hubble_constant <= float
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ # This accepts a filename or a set of arguments and returns True or
+ # False depending on if the file is of the type requested.
+ return False
+
+
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+ FieldInfoContainer, \
+ NullFunc, \
+ TranslationFunc, \
+ FieldInfo, \
+ ValidateParameter, \
+ ValidateDataField, \
+ ValidateProperty, \
+ ValidateSpatial, \
+ ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+ kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt. This
+# construct shows how to do that. Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+ "y-velocity": "vely",
+ "z-velocity": "velz",
+ "Density": "dens",
+ "Temperature": "temp",
+ "Pressure" : "pres",
+ "Grav_Potential" : "gpot",
+ "particle_position_x" : "particle_posx",
+ "particle_position_y" : "particle_posy",
+ "particle_position_z" : "particle_posz",
+ "particle_velocity_x" : "particle_velx",
+ "particle_velocity_y" : "particle_vely",
+ "particle_velocity_z" : "particle_velz",
+ "particle_index" : "particle_tag",
+ "Electron_Fraction" : "elec",
+ "HI_Fraction" : "h ",
+ "HD_Fraction" : "hd ",
+ "HeI_Fraction": "hel ",
+ "HeII_Fraction": "hep ",
+ "HeIII_Fraction": "hepp",
+ "HM_Fraction": "hmin",
+ "HII_Fraction": "hp ",
+ "H2I_Fraction": "htwo",
+ "H2II_Fraction": "htwp",
+ "DI_Fraction": "deut",
+ "DII_Fraction": "dplu",
+ "ParticleMass": "particle_mass",
+ "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+ if v not in KnownSkeletonFields:
+ pfield = v.startswith("particle")
+ add_skeleton_field(v, function=NullFunc, take_log=False,
+ validators = [ValidateDataField(v)],
+ particle_type = pfield)
+ if f.endswith("_Fraction") :
+ dname = "%s\/Fraction" % f.split("_")[0]
+ else :
+ dname = f
+ ff = KnownSkeletonFields[v]
+ pfield = f.startswith("particle")
+ add_field(f, TranslationFunc(v),
+ take_log=KnownSkeletonFields[v].take_log,
+ units = ff._units, display_name=dname,
+ particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+ convert_function=_get_convert("dens"),
+ units=r"\rm{g}/\rm{cm}^3")
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+ BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+ _particle_reader = False
+ _data_style = "skeleton"
+
+ def _read_data_set(self, grid, field):
+ # This must return the array, of size/shape grid.ActiveDimensions, that
+ # corresponds to 'field'.
+ pass
+
+ def _read_data_slice(self, grid, field, axis, coord):
+ # If this is not implemented, the IO handler will just slice a
+ # _read_data_set item.
+ pass
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('skeleton', parent_package, top_path)
+ config.make_config_py() # installs __config__.py
+ #config.make_svn_version_py()
+ return config
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -101,18 +101,11 @@
self.Parent = None
def _setup_dx(self):
- # So first we figure out what the index is. We don't assume
- # that dx=dy=dz , at least here. We probably do elsewhere.
- id = self.id - self._id_offset
- if self.Parent is not None:
- self.dds = self.Parent[0].dds / self.pf.refine_by
- else:
- LE, RE = self.hierarchy.grid_left_edge[id,:], \
- self.hierarchy.grid_right_edge[id,:]
- self.dds = np.array((RE-LE)/self.ActiveDimensions)
-
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
+ # has already been read in and stored in hierarchy
+ dx = self.hierarchy.grid_dxs[self.index][0]
+ dy = self.hierarchy.grid_dys[self.index][0]
+ dz = self.hierarchy.grid_dzs[self.index][0]
+ self.dds = np.array([dx, dy, dz])
self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def __repr__(self):
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -89,17 +89,8 @@
return self.start_index
def _setup_dx(self):
- # So first we figure out what the index is. We don't assume
- # that dx=dy=dz , at least here. We probably do elsewhere.
- id = self.id - self._id_offset
- if len(self.Parent) > 0:
- self.dds = self.Parent[0].dds / self.pf.refine_by
- else:
- LE, RE = self.hierarchy.grid_left_edge[id,:], \
- self.hierarchy.grid_right_edge[id,:]
- self.dds = np.array((RE-LE)/self.ActiveDimensions)
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
+ # has already been read in and stored in hierarchy
+ self.dds = self.hierarchy.dds_list[self.Level]
self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
class ChomboHierarchy(AMRHierarchy):
@@ -176,11 +167,13 @@
# 'Chombo_global'
levels = f.keys()[1:]
grids = []
+ self.dds_list = []
i = 0
for lev in levels:
level_number = int(re.match('level_(\d+)',lev).groups()[0])
boxes = f[lev]['boxes'].value
dx = f[lev].attrs['dx']
+ self.dds_list.append(dx * np.ones(3))
for level_id, box in enumerate(boxes):
si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -98,6 +98,21 @@
add_field("Density",function=_Density, take_log=True,
units=r'\rm{g}/\rm{cm^3}')
+def _Bx(field,data):
+ return data["X-magnfield"]
+add_field("Bx", function=_Bx, take_log=False,
+ units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(field,data):
+ return data["Y-magnfield"]
+add_field("By", function=_By, take_log=False,
+ units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(field,data):
+ return data["Z-magnfield"]
+add_field("Bz", function=_Bz, take_log=False,
+ units=r"\rm{Gauss}", display_name=r"B_z")
+
def _MagneticEnergy(field,data):
return (data["X-magnfield"]**2 +
data["Y-magnfield"]**2 +
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -23,6 +23,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+import numpy as np
from yt.data_objects.field_info_container import \
FieldInfoContainer, \
NullFunc, \
@@ -97,7 +98,10 @@
if fn1.endswith("_Fraction"):
add_field(fn1.split("_")[0] + "_Density",
function=_get_density(fn1), take_log=True,
- display_name="%s\/Density" % fn1.split("_")[0])
+ display_name="%s\/Density" % fn1.split("_")[0],
+ units = r"\rm{g}/\rm{cm}^3",
+ projected_units = r"\rm{g}/\rm{cm}^2",
+ )
def _get_convert(fname):
def _conv(data):
@@ -106,7 +110,8 @@
add_flash_field("dens", function=NullFunc, take_log=True,
convert_function=_get_convert("dens"),
- units=r"\rm{g}/\rm{cm}^3")
+ units=r"\rm{g}/\rm{cm}^3",
+ projected_units = r"\rm{g}/\rm{cm}^2"),
add_flash_field("velx", function=NullFunc, take_log=False,
convert_function=_get_convert("velx"),
units=r"\rm{cm}/\rm{s}")
@@ -203,6 +208,7 @@
add_field(f, TranslationFunc(v),
take_log=KnownFLASHFields[v].take_log,
units = ff._units, display_name=dname,
+ projected_units = ff._projected_units,
particle_type = pfield)
def _convertParticleMassMsun(data):
@@ -254,3 +260,43 @@
add_field("GasEnergy", function=_GasEnergy,
units=r"\rm{ergs}/\rm{g}")
+
+# See http://flash.uchicago.edu/pipermail/flash-users/2012-October/001180.html
+# along with the attachment to that e-mail for details
+def GetMagRescalingFactor(pf):
+ if pf['unitsystem'].lower() == "cgs":
+ factor = 1
+ if pf['unitsystem'].lower() == "si":
+ factor = np.sqrt(4*np.pi/1e7)
+ if pf['unitsystem'].lower() == "none":
+ factor = np.sqrt(4*np.pi)
+ else:
+ raise RuntimeError("Runtime parameter unitsystem with"
+ "value %s is unrecognized" % pf['unitsystem'])
+ return factor
+
+def _Bx(fields, data):
+ factor = GetMagRescalingFactor(data.pf)
+ return data['magx']*factor
+add_field("Bx", function=_Bx, take_log=False,
+ units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+ factor = GetMagRescalingFactor(data.pf)
+ return data['magy']*factor
+add_field("By", function=_By, take_log=False,
+ units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+ factor = GetMagRescalingFactor(data.pf)
+ return data['magz']*factor
+add_field("Bz", function=_Bz, take_log=False,
+ units=r"\rm{Gauss}", display_name=r"B_z")
+
+def _DivB(fields, data):
+ factor = GetMagRescalingFactor(data.pf)
+ return data['divb']*factor
+add_field("DivB", function=_DivB, take_log=False,
+ units=r"\rm{Gauss}\/\rm{cm}^{-1}")
+
+
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -37,6 +37,8 @@
AMRHierarchy
from yt.data_objects.static_output import \
StaticOutput
+from yt.utilities.lib import \
+ get_box_grids_level
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
@@ -133,14 +135,25 @@
del levels, glis, gdims
def _populate_grid_objects(self):
- for g in self.grids:
+ mask = np.empty(self.grids.size, dtype='int32')
+ for gi, g in enumerate(self.grids):
g._prepare_grid()
g._setup_dx()
- for g in self.grids:
+ for gi, g in enumerate(self.grids):
g.Children = self._get_grid_children(g)
for g1 in g.Children:
g1.Parent.append(g)
+ get_box_grids_level(self.grid_left_edge[gi,:],
+ self.grid_right_edge[gi,:],
+ self.grid_levels[gi],
+ self.grid_left_edge, self.grid_right_edge,
+ self.grid_levels, mask)
+ m = mask.astype("bool")
+ m[gi] = False
+ siblings = self.grids[gi:][m[gi:]]
+ if len(siblings) > 0:
+ g.OverlappingSiblings = siblings.tolist()
self.max_level = self.grid_levels.max()
def _setup_derived_fields(self):
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -102,17 +102,11 @@
self.Parent = None
def _setup_dx(self):
- # So first we figure out what the index is. We don't assume
- # that dx=dy=dz , at least here. We probably do elsewhere.
- id = self.id - self._id_offset
- if self.Parent is not None:
- self.dds = self.Parent[0].dds / self.pf.refine_by
- else:
- LE, RE = self.hierarchy.grid_left_edge[id,:], \
- self.hierarchy.grid_right_edge[id,:]
- self.dds = np.array((RE-LE)/self.ActiveDimensions)
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
+ # has already been read in and stored in hierarchy
+ dx = self.hierarchy.grid_dxs[self.index][0]
+ dy = self.hierarchy.grid_dys[self.index][0]
+ dz = self.hierarchy.grid_dzs[self.index][0]
+ self.dds = np.array([dx, dy, dz])
self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def __repr__(self):
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -100,18 +100,11 @@
self.Parent = None
def _setup_dx(self):
- # So first we figure out what the index is. We don't assume that
- # dx=dy=dz here.
- id = self.id - self._id_offset
- if self.Parent is not None:
- self.dds = self.Parent[0].dds / self.pf.refine_by
- else:
- LE, RE = self.hierarchy.grid_left_edge[id,:], \
- self.hierarchy.grid_right_edge[id,:]
- self.dds = np.array((RE - LE) / self.ActiveDimensions)
-
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
+ # has already been read in and stored in hierarchy
+ dx = self.hierarchy.grid_dxs[self.index][0]
+ dy = self.hierarchy.grid_dys[self.index][0]
+ dz = self.hierarchy.grid_dzs[self.index][0]
+ self.dds = np.array([dx, dy, dz])
self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def __repr__(self):
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -99,17 +99,11 @@
self.Parent = None
def _setup_dx(self):
- # So first we figure out what the index is. We don't assume
- # that dx=dy=dz , at least here. We probably do elsewhere.
- id = self.id - self._id_offset
- if self.Parent is not None:
- self.dds = self.Parent[0].dds / self.pf.refine_by
- else:
- LE, RE = self.hierarchy.grid_left_edge[id,:], \
- self.hierarchy.grid_right_edge[id,:]
- self.dds = np.array((RE-LE)/self.ActiveDimensions)
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
+ # has already been read in and stored in hierarchy
+ dx = self.hierarchy.grid_dxs[self.index][0]
+ dy = self.hierarchy.grid_dys[self.index][0]
+ dz = self.hierarchy.grid_dzs[self.index][0]
+ self.dds = np.array([dx, dy, dz])
self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def __repr__(self):
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -122,7 +122,7 @@
get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
callback_registry, write_bitmap, write_image, annotate_image, \
apply_colormap, scale_image, write_projection, write_fits, \
- SlicePlot, OffAxisSlicePlot, ProjectionPlot
+ SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
from yt.visualization.volume_rendering.api import \
ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/testing.py
--- /dev/null
+++ b/yt/testing.py
@@ -0,0 +1,143 @@
+"""Provides utility and helper functions for testing in yt.
+
+Author: Anthony Scpatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 Anthony Scopatz. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from yt.funcs import *
+from numpy.testing import assert_array_equal
+
+def amrspace(extent, levels=7, cells=8):
+ """Creates two numpy arrays representing the left and right bounds of
+ an AMR grid as well as an array for the AMR level of each cell.
+
+ Parameters
+ ----------
+ extent : array-like
+ This a sequence of length 2*ndims that is the bounds of each dimension.
+ For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
+ A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
+ levels : int or sequence of ints, optional
+ This is the number of AMR refinement levels. If given as a sequence (of
+ length ndims), then each dimension will be refined down to this level.
+ All values in this array must be the same or zero. A zero valued dimension
+ indicates that this dim should not be refined. Taking the 3D cylindrical
+ example above if we don't want refine theta but want r and z at 5 we would
+ set levels=(5, 5, 0).
+ cells : int, optional
+ This is the number of cells per refinement level.
+
+ Returns
+ -------
+ left : float ndarray, shape=(npoints, ndims)
+ The left AMR grid points.
+ right : float ndarray, shape=(npoints, ndims)
+ The right AMR grid points.
+ level : int ndarray, shape=(npoints,)
+ The AMR level for each point.
+
+ Examples
+ --------
+ >>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
+ >>> print l
+ [[ 0. 1. 0. ]
+ [ 0.25 1. 0. ]
+ [ 0. 1.125 0. ]
+ [ 0.25 1.125 0. ]
+ [ 0.5 1. 0. ]
+ [ 0. 1.25 0. ]
+ [ 0.5 1.25 0. ]
+ [ 1. 1. 0. ]
+ [ 0. 1.5 0. ]
+ [ 1. 1.5 0. ]]
+
+ """
+ extent = np.asarray(extent, dtype='f8')
+ dextent = extent[1::2] - extent[::2]
+ ndims = len(dextent)
+
+ if isinstance(levels, int):
+ minlvl = maxlvl = levels
+ levels = np.array([levels]*ndims, dtype='int32')
+ else:
+ levels = np.asarray(levels, dtype='int32')
+ minlvl = levels.min()
+ maxlvl = levels.max()
+ if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
+ raise ValueError("all levels must have the same value or zero.")
+ dims_zero = (levels == 0)
+ dims_nonzero = ~dims_zero
+ ndims_nonzero = dims_nonzero.sum()
+
+ npoints = (cells**ndims_nonzero - 1)*maxlvl + 1
+ left = np.empty((npoints, ndims), dtype='float64')
+ right = np.empty((npoints, ndims), dtype='float64')
+ level = np.empty(npoints, dtype='int32')
+
+ # fill zero dims
+ left[:,dims_zero] = extent[::2][dims_zero]
+ right[:,dims_zero] = extent[1::2][dims_zero]
+
+ # fill non-zero dims
+ dcell = 1.0 / cells
+ left_slice = tuple([slice(extent[2*n], extent[2*n+1], extent[2*n+1]) if \
+ dims_zero[n] else slice(0.0,1.0,dcell) for n in range(ndims)])
+ right_slice = tuple([slice(extent[2*n+1], extent[2*n], -extent[2*n+1]) if \
+ dims_zero[n] else slice(dcell,1.0+dcell,dcell) for n in range(ndims)])
+ left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
+ lng_zero = left_norm_grid[:,dims_zero]
+ lng_nonzero = left_norm_grid[:,dims_nonzero]
+
+ right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
+ rng_zero = right_norm_grid[:,dims_zero]
+ rng_nonzero = right_norm_grid[:,dims_nonzero]
+
+ level[0] = maxlvl
+ left[0,:] = extent[::2]
+ right[0,dims_zero] = extent[1::2][dims_zero]
+ right[0,dims_nonzero] = (dcell**maxlvl)*dextent[dims_nonzero] + extent[::2][dims_nonzero]
+ for i, lvl in enumerate(range(maxlvl, 0, -1)):
+ start = (cells**ndims_nonzero - 1)*i + 1
+ stop = (cells**ndims_nonzero - 1)*(i+1) + 1
+ dsize = dcell**(lvl-1) * dextent[dims_nonzero]
+ level[start:stop] = lvl
+ left[start:stop,dims_zero] = lng_zero
+ left[start:stop,dims_nonzero] = lng_nonzero*dsize + extent[::2][dims_nonzero]
+ right[start:stop,dims_zero] = rng_zero
+ right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
+
+ return left, right, level
+
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+ from yt.frontends.stream.api import load_uniform_grid
+ if not iterable(ndims):
+ ndims = [ndims, ndims, ndims]
+ else:
+ assert(len(ndims) == 3)
+ if negative:
+ offset = 0.5
+ else:
+ offset = 0.0
+ data = dict((field, (np.random.random(ndims) - offset) * peak_value)
+ for field in fields)
+ ug = load_uniform_grid(data, ndims, 1.0)
+ return ug
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -998,11 +998,11 @@
anprocs = 2**par_tree_depth
volume_partitioned = 0.0
- pbar = get_pbar("Building kd-Tree",
- np.prod(self.domain_right_edge-self.domain_left_edge))
+ total_vol = np.prod(self.domain_right_edge-self.domain_left_edge)
+ pbar = get_pbar("Building kd-Tree", total_vol)
while current_node is not None:
- pbar.update(volume_partitioned)
+ pbar.update(min(volume_partitioned, total_vol))
# If we don't have any grids, that means we are revisiting
# a dividing node, and there is nothing to be done.
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1188,6 +1188,40 @@
import yt.utilities.lodgeit as lo
lo.main( None, download=args.number )
+class YTNotebookUploadCmd(YTCommand):
+ args = (dict(short="file", type=str),)
+ description = \
+ """
+ Upload an IPython notebook to hub.yt-project.org.
+ """
+
+ name = "upload_notebook"
+ def __call__(self, args):
+ filename = args.file
+ if not os.path.isfile(filename):
+ raise IOError(filename)
+ if not filename.endswith(".ipynb"):
+ print "File must be an IPython notebook!"
+ return 1
+ import json
+ try:
+ t = json.loads(open(filename).read())['metadata']['name']
+ except (ValueError, KeyError):
+ print "File does not appear to be an IPython notebook."
+ from yt.utilities.minimal_representation import MinimalNotebook
+ mn = MinimalNotebook(filename, t)
+ rv = mn.upload()
+ print "Upload successful!"
+ print
+ print "To access your raw notebook go here:"
+ print
+ print " %s" % (rv['url'])
+ print
+ print "To view your notebook go here:"
+ print
+ print " %s" % (rv['url'].replace("/go/", "/nb/"))
+ print
+
class YTPlotCmd(YTCommand):
args = ("width", "unit", "bn", "proj", "center",
"zlim", "axis", "field", "weight", "skip",
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -141,3 +141,8 @@
def __str__(self):
return "This parameter file doesn't recognize %s" % self.unit
+
+class YTHubRegisterError(YTException):
+ def __str__(self):
+ return "You must create an API key before uploading. See " + \
+ "https://data.yt-project.org/getting_started.html"
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/flagging_methods.py
--- /dev/null
+++ b/yt/utilities/flagging_methods.py
@@ -0,0 +1,51 @@
+"""
+Utilities for flagging zones for refinement in a dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np # For modern purposes
+
+flagging_method_registry = {}
+
+def flag_cells(grid, methods):
+ flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+ for method in methods:
+ flagged |= method(grid)
+ return flagged
+
+class FlaggingMethod(object):
+ _skip_add = False
+ class __metaclass__(type):
+ def __init__(cls, name, b, d):
+ type.__init__(cls, name, b, d)
+ if hasattr(cls, "_type_name") and not cls._skip_add:
+ flagging_method_registry[cls._type_name] = cls
+
+class OverDensity(FlaggingMethod):
+ _type_name = "overdensity"
+ def __init__(self, over_density):
+ self.over_density = over_density
+
+ def __call__(self, pf, grid):
+ rho = grid["Density"] / (pf.refine_by**grid.Level)
+ return (rho > self.over_density)
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -53,8 +53,8 @@
@cython.boundscheck(False)
@cython.wraparound(False)
-def read_and_seek(char *filename, int offset1, int offset2,
- np.ndarray buffer, int bytes):
+def read_and_seek(char *filename, np.int64_t offset1,
+ np.int64_t offset2, np.ndarray buffer, int bytes):
cdef FILE *f = fopen(filename, "rb")
cdef void *buf = <void *> buffer.data
cdef char line[1024]
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -590,7 +590,7 @@
cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
for i in range(pos_x.shape[0]):
kdtree_utils.kd_insert3(self.tree,
- pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+ pos_x[i], pos_y[i], pos_z[i], <void *> (pointer + i*3))
def __dealloc__(self):
kdtree_utils.kd_free(self.tree)
@@ -616,7 +616,7 @@
cdef np.float64_t slopes[6], dp[3], ds[3]
cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
- cdef int nstars
+ cdef int nstars, dti, i, j
cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
for i in range(3):
dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
@@ -648,6 +648,7 @@
dvs[i] = temp
for dti in range(vri.n_samples):
# Now we add the contribution from stars
+ kdtree_utils.kd_res_rewind(ballq)
for i in range(nstars):
kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
@@ -655,20 +656,22 @@
gexp = (px - pos[0])*(px - pos[0]) \
+ (py - pos[1])*(py - pos[1]) \
+ (pz - pos[2])*(pz - pos[2])
- gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
- for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+ gaussian = vri.star_coeff * exp(-gexp/vri.star_sigma_num)
+ for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
for i in range(3):
pos[i] += local_dds[i]
FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
vri.field_table_ids, vri.grey_opacity)
for i in range(vc.n_fields):
dvs[i] += slopes[i]
+ kdtree_utils.kd_res_free(ballq)
cdef class VolumeRenderSampler(ImageSampler):
cdef VolumeRenderAccumulator *vra
cdef public object tf_obj
cdef public object my_field_tables
cdef kdtree_utils.kdtree **trees
+ cdef object tree_containers
def __cinit__(self,
np.ndarray vp_pos,
np.ndarray vp_dir,
@@ -709,6 +712,7 @@
self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
self.supp_data = <void *> self.vra
cdef star_kdtree_container skdc
+ self.tree_containers = star_list
if star_list is None:
self.trees = NULL
else:
@@ -719,10 +723,15 @@
self.trees[i] = skdc.tree
cdef void setup(self, PartitionedGrid pg):
+ cdef star_kdtree_container star_tree
if self.trees == NULL:
self.sampler = volume_render_sampler
else:
+ star_tree = self.tree_containers[pg.parent_grid_id]
self.vra.star_list = self.trees[pg.parent_grid_id]
+ self.vra.star_sigma_num = 2.0*star_tree.sigma**2.0
+ self.vra.star_er = 2.326 * star_tree.sigma
+ self.vra.star_coeff = star_tree.coeff
self.sampler = volume_render_stars_sampler
def __dealloc__(self):
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -30,6 +30,7 @@
from tempfile import TemporaryFile
from yt.config import ytcfg
from yt.funcs import *
+from yt.utilities.exceptions import *
from .poster.streaminghttp import register_openers
from .poster.encode import multipart_encode
@@ -93,6 +94,7 @@
def upload(self):
api_key = ytcfg.get("yt","hub_api_key")
url = ytcfg.get("yt","hub_url")
+ if api_key == '': raise YTHubRegisterError
metadata, (final_name, chunks) = self._generate_post()
if hasattr(self, "_pf_mrep"):
self._pf_mrep.upload()
@@ -216,3 +218,22 @@
metadata = self._attrs
chunks = []
return (metadata, ("chunks", []))
+
+class MinimalNotebook(MinimalRepresentation):
+ type = "notebook"
+ _attr_list = ("title",)
+
+ def __init__(self, filename, title = None):
+ # First we read in the data
+ if not os.path.isfile(filename):
+ raise IOError(filename)
+ self.data = open(filename).read()
+ if title is None:
+ title = json.loads(self.data)['metadata']['name']
+ self.title = title
+ self.data = np.fromstring(self.data, dtype='c')
+
+ def _generate_post(self):
+ metadata = self._attrs
+ chunks = [ ("notebook", self.data) ]
+ return (metadata, ("chunks", chunks))
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -67,6 +67,8 @@
t = np.cross(normal_vector, vecs).sum(axis=1)
ax = t.argmax()
east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+ # self.north_vector must remain None otherwise rotations about a fixed axis will break.
+ # The north_vector calculated here will still be included in self.unit_vectors.
north_vector = np.cross(normal_vector, east_vector).ravel()
else:
if self.steady_north:
@@ -82,7 +84,7 @@
r"""Change the view direction based on any of the orientation parameters.
This will recalculate all the necessary vectors and vector planes related
- to a an orientable object.
+ to an orientable object.
Parameters
----------
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/tests/test_flagging_methods.py
--- /dev/null
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -0,0 +1,12 @@
+from yt.testing import *
+from yt.utilities.flagging_methods import flagging_method_registry
+
+def setup():
+ global pf
+ pf = fake_random_pf(64)
+ pf.h
+
+def test_over_density():
+ od_flag = flagging_method_registry["overdensity"](0.75)
+ criterion = (pf.h.grids[0]["Density"] > 0.75)
+ assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/utilities/tests/test_interpolators.py
--- /dev/null
+++ b/yt/utilities/tests/test_interpolators.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+import yt.utilities.linear_interpolators as lin
+
+def setup():
+ pass
+
+def test_linear_interpolator_1d():
+ random_data = np.random.random(64)
+ fv = {'x': np.mgrid[0.0:1.0:64j]}
+ ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
+ assert_array_equal(ufi(fv), random_data)
+
+def test_linear_interpolator_2d():
+ random_data = np.random.random((64, 64))
+ fv = dict((ax, v) for ax, v in zip("xyz",
+ np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
+ bfi = lin.BilinearFieldInterpolator(random_data,
+ (0.0, 1.0, 0.0, 1.0), "xy", True)
+ assert_array_equal(bfi(fv), random_data)
+
+def test_linear_interpolator_3d():
+ random_data = np.random.random((64, 64, 64))
+ fv = dict((ax, v) for ax, v in zip("xyz",
+ np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
+ tfi = lin.TrilinearFieldInterpolator(random_data,
+ (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
+ assert_array_equal(tfi(fv), random_data)
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -66,6 +66,7 @@
from plot_window import \
SlicePlot, \
OffAxisSlicePlot, \
- ProjectionPlot
+ ProjectionPlot, \
+ OffAxisProjectionPlot
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -28,6 +28,7 @@
x_dict, \
y_dict, \
axis_names
+from .volume_rendering.api import off_axis_projection
import _MPL
import numpy as np
import weakref
@@ -384,3 +385,28 @@
self.bounds).transpose()
self[item] = buff
return buff
+
+
+class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
+ def __init__(self, data_source, bounds, buff_size, antialias = True,
+ periodic = False):
+ self.data = {}
+ FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+ def __getitem__(self, item):
+ if item in self.data: return self.data[item]
+ mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+ (item, self.buff_size[0], self.buff_size[1]))
+ ds = self.data_source
+ width = (self.bounds[1] - self.bounds[0],
+ self.bounds[3] - self.bounds[2],
+ self.bounds[5] - self.bounds[4])
+ buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+ width, ds.resolution, item,
+ weight=ds.weight_field, volume=ds.volume,
+ no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+ north_vector=ds.north_vector)
+ self[item] = buff.swapaxes(0,1)
+ return buff
+
+
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -152,8 +152,7 @@
alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
if transpose:
- for channel in range(bitmap_array.shape[2]):
- bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
+ bitmap_array = bitmap_array.swapaxes(0,1)
if filename is not None:
au.write_png(bitmap_array.copy(), filename)
else:
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -146,7 +146,9 @@
def __call__(self, plot):
# Instantiation of these is cheap
if plot._type_name == "CuttingPlane":
- print "WARNING: Magnetic field on Cutting Plane Not implemented."
+ qcb = CuttingQuiverCallback("CuttingPlaneBx",
+ "CuttingPlaneBy",
+ self.factor)
else:
xv = "B%s" % (x_names[plot.data.axis])
yv = "B%s" % (y_names[plot.data.axis])
@@ -432,6 +434,9 @@
iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
+ # scale into data units
+ lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
+ lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
for i in range(self.data_size[0]):
for j in range(self.data_size[1]):
plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
@@ -650,8 +655,8 @@
plot.data[self.field_y],
int(nx), int(ny),
(x0, x1, y0, y1),).transpose()
- X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
- Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+ X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+ np.linspace(yy0,yy1,ny,endpoint=True))
plot._axes.quiver(X,Y, pixX, pixY)
plot._axes.set_xlim(xx0,xx1)
plot._axes.set_ylim(yy0,yy1)
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,7 +27,10 @@
import base64
import matplotlib.figure
from matplotlib.mathtext import MathTextParser
-from matplotlib.pyparsing import ParseFatalException
+try:
+ from matplotlib.pyparsing import ParseFatalException
+except ImportError:
+ from pyparsing import ParseFatalException
import cStringIO
import types
import __builtin__
@@ -40,7 +43,8 @@
write_image, apply_colormap
from .fixed_resolution import \
FixedResolutionBuffer, \
- ObliqueFixedResolutionBuffer
+ ObliqueFixedResolutionBuffer, \
+ OffAxisProjectionFixedResolutionBuffer
from .plot_modifications import get_smallest_appropriate_unit, \
callback_registry
from .tick_locators import LogLocator, LinearLocator
@@ -103,7 +107,10 @@
self.pf = frb.pf
self.xlim = viewer.xlim
self.ylim = viewer.ylim
- self._type_name = ''
+ if 'Cutting' in self.data.__class__.__name__:
+ self._type_name = "CuttingPlane"
+ else:
+ self._type_name = ''
class FieldTransform(object):
def __init__(self, name, func, locator):
@@ -154,7 +161,7 @@
center[y_dict[axis]]+width[1]/2]
return (bounds,center)
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1'):
+def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
if width == None:
width = (pf.domain_width.min(),
pf.domain_width.min())
@@ -165,6 +172,13 @@
width = (width, width)
Wx, Wy = width
width = np.array((Wx/pf[unit], Wy/pf[unit]))
+ if depth != None:
+ if iterable(depth) and isinstance(depth[1],str):
+ d,unit = depth
+ depth = d/pf[unit]
+ elif iterable(depth):
+ raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+ width = np.append(width,depth)
if isinstance(center,str):
if center.lower() == 'm' or center.lower() == 'max':
v, center = pf.h.find_max("Density")
@@ -173,16 +187,19 @@
else:
raise RuntimeError('center keyword \"%s\" not recognized'%center)
- # Transforming to the cutting plane coordinate system
- center = np.array(center)
- center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
- (normal,perp1,perp2) = ortho_find(normal)
- mat = np.transpose(np.column_stack((perp1,perp2,normal)))
- center = np.dot(mat,center)
- width = width/pf.domain_width.min()
+ if width.shape == (2,):
+ # Transforming to the cutting plane coordinate system
+ center = np.array(center)
+ center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
+ (normal,perp1,perp2) = ortho_find(normal)
+ mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+ center = np.dot(mat,center)
+ width = width
+
+ bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+ else:
+ bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
- bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
-
return (bounds,center)
class PlotWindow(object):
@@ -244,20 +261,14 @@
old_fields = None
if self._frb is not None:
old_fields = self._frb.keys()
- try:
+ if hasattr(self,'zlim'):
+ bounds = self.xlim+self.ylim+self.zlim
+ else:
bounds = self.xlim+self.ylim
- if self.oblique == False:
- self._frb = FixedResolutionBuffer(self.data_source,
- bounds, self.buff_size,
- self.antialias,
- periodic=self._periodic)
- else:
- self._frb = ObliqueFixedResolutionBuffer(self.data_source,
- bounds, self.buff_size,
- self.antialias,
- periodic=self._periodic)
- except:
- raise RuntimeError("Failed to repixelize.")
+ self._frb = self._frb_generator(self.data_source,
+ bounds, self.buff_size,
+ self.antialias,
+ periodic=self._periodic)
if old_fields is None:
self._frb._get_data_source_fields()
else:
@@ -298,6 +309,7 @@
nWx, nWy = Wx/factor, Wy/factor
self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
+
@invalidate_data
def pan(self, deltas):
@@ -344,12 +356,16 @@
dy = bounds[3] - bounds[2]
self.xlim = (self.center[0] - dx/2., self.center[0] + dx/2.)
self.ylim = (self.center[1] - dy/2., self.center[1] + dy/2.)
- mylog.info("xlim = %f %f" %self.xlim)
- mylog.info("ylim = %f %f" %self.ylim)
else:
- self.xlim = bounds[0:2]
- self.ylim = bounds[2:]
-
+ self.xlim = tuple(bounds[0:2])
+ self.ylim = tuple(bounds[2:4])
+ if len(bounds) == 6:
+ self.zlim = tuple(bounds[4:6])
+ mylog.info("xlim = %f %f" %self.xlim)
+ mylog.info("ylim = %f %f" %self.ylim)
+ if hasattr(self,'zlim'):
+ mylog.info("zlim = %f %f" %self.zlim)
+
@invalidate_data
def set_width(self, width, unit = '1'):
"""set the width of the plot window
@@ -395,14 +411,20 @@
width = (Wx,Wy)
width = [w / self.pf[unit] for w in width]
- centerx = (self.xlim[1] + self.xlim[0])/2
- centery = (self.ylim[1] + self.ylim[0])/2
+ centerx = (self.xlim[1] + self.xlim[0])/2.
+ centery = (self.ylim[1] + self.ylim[0])/2.
self.xlim = (centerx - width[0]/2.,
centerx + width[0]/2.)
self.ylim = (centery - width[1]/2.,
centery + width[1]/2.)
+ if hasattr(self,'zlim'):
+ centerz = (self.zlim[1] + self.zlim[0])/2.
+ mw = max(width)
+ self.zlim = (centerz - mw/2.,
+ centerz + mw/2.)
+
@invalidate_data
def set_center(self, new_center, unit = '1'):
"""Sets a new center for the plot window
@@ -809,7 +831,7 @@
raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
self.plots[field].image.set_cmap(cmap)
- def save(self,name=None,mpl_kwargs={}):
+ def save(self, name=None, mpl_kwargs=None):
"""saves the plot to disk.
Parameters
@@ -827,15 +849,12 @@
name = str(self.pf)
elif name.endswith('.png'):
return v.save(name)
+ if mpl_kwargs is None: mpl_kwargs = {}
axis = axis_names[self.data_source.axis]
weight = None
- if 'Slice' in self.data_source.__class__.__name__:
- type = 'Slice'
- if 'Proj' in self.data_source.__class__.__name__:
- type = 'Projection'
+ type = self._plot_type
+ if type in ['Projection','OffAxisProjection']:
weight = self.data_source.weight_field
- if 'Cutting' in self.data_source.__class__.__name__:
- type = 'OffAxisSlice'
names = []
for k, v in self.plots.iteritems():
if axis:
@@ -849,11 +868,15 @@
return names
def _send_zmq(self):
- from IPython.zmq.pylab.backend_inline import \
- send_figure
+ try:
+ # pre-IPython v0.14
+ from IPython.zmq.pylab.backend_inline import send_figure as display
+ except ImportError:
+ # IPython v0.14+
+ from IPython.core.display import display
for k, v in sorted(self.plots.iteritems()):
canvas = FigureCanvasAgg(v.figure)
- send_figure(v.figure)
+ display(v.figure)
def show(self):
r"""This will send any existing plots to the IPython notebook.
@@ -879,6 +902,9 @@
raise YTNotInsideNotebook
class SlicePlot(PWViewerMPL):
+ _plot_type = 'Slice'
+ _frb_generator = FixedResolutionBuffer
+
def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
origin='center-window'):
r"""Creates a slice plot from a parameter file
@@ -954,6 +980,9 @@
self.set_axes_unit(axes_unit)
class ProjectionPlot(PWViewerMPL):
+ _plot_type = 'Projection'
+ _frb_generator = FixedResolutionBuffer
+
def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
weight_field=None, max_level=None, origin='center-window'):
r"""Creates a projection plot from a parameter file
@@ -1033,6 +1062,9 @@
self.set_axes_unit(axes_unit)
class OffAxisSlicePlot(PWViewerMPL):
+ _plot_type = 'OffAxisSlice'
+ _frb_generator = ObliqueFixedResolutionBuffer
+
def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'),
axes_unit=None, north_vector=None):
r"""Creates an off axis slice plot from a parameter file
@@ -1080,6 +1112,95 @@
PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
self.set_axes_unit(axes_unit)
+class OffAxisProjectionDummyDataSource(object):
+ _type_name = 'proj'
+ proj_style = 'integrate'
+ _key_fields = []
+ def __init__(self, center, pf, normal_vector, width, fields,
+ interpolated, resolution = (800,800), weight=None,
+ volume=None, no_ghost=False, le=None, re=None,
+ north_vector=None):
+ self.center = center
+ self.pf = pf
+ self.axis = 4 # always true for oblique data objects
+ self.normal_vector = normal_vector
+ self.width = width
+ self.fields = fields
+ self.interpolated = interpolated
+ self.resolution = resolution
+ self.weight_field = weight
+ self.volume = volume
+ self.no_ghost = no_ghost
+ self.le = le
+ self.re = re
+ self.north_vector = north_vector
+
+class OffAxisProjectionPlot(PWViewerMPL):
+ _plot_type = 'OffAxisProjection'
+ _frb_generator = OffAxisProjectionFixedResolutionBuffer
+
+ def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'),
+ depth=(1,'unitary'), axes_unit=None, weight_field=None,
+ max_level=None, north_vector=None, volume=None, no_ghost=False,
+ le=None, re=None, interpolated=False):
+ r"""Creates an off axis projection plot from a parameter file
+
+ Given a pf object, a normal vector to project along, and
+ a field name string, this will return a PWViewrMPL object
+ containing the plot.
+
+ The plot can be updated using one of the many helper functions
+ defined in PlotWindow.
+
+ Parameters
+ ----------
+ pf : :class:`yt.data_objects.api.StaticOutput`
+ This is the parameter file object corresponding to the
+ simulation output to be plotted.
+ normal : a sequence of floats
+ The vector normal to the slicing plane.
+ fields : string
+ The name of the field(s) to be plotted.
+ center : A two or three-element vector of sequence floats, 'c', or 'center'
+ The coordinate of the center of the image. If left blanck,
+ the image centers on the location of the maximum density
+ cell. If set to 'c' or 'center', the plot is centered on
+ the middle of the domain.
+ width : A tuple or a float
+ A tuple containing the width of image and the string key of
+ the unit: (width, 'unit'). If set to a float, code units
+ are assumed
+ depth : A tuple or a float
+ A tuple containing the depth to project thourhg and the string
+ key of the unit: (width, 'unit'). If set to a float, code units
+ are assumed
+ weight_field : string
+ The name of the weighting field. Set to None for no weight.
+ max_level: int
+ The maximum level to project to.
+ axes_unit : A string
+ The name of the unit for the tick labels on the x and y axes.
+ Defaults to None, which automatically picks an appropriate unit.
+ If axes_unit is '1', 'u', or 'unitary', it will not display the
+ units, and only show the axes name.
+ north-vector : a sequence of floats
+ A vector defining the 'up' direction in the plot. This
+ option sets the orientation of the slicing plane. If not
+ set, an arbitrary grid-aligned north-vector is chosen.
+
+ """
+ (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
+ # Hard-coding the resolution for now
+ fields = ensure_list(fields)[:]
+ width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
+ OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
+ weight=weight_field, volume=volume, no_ghost=no_ghost,
+ le=le, re=re, north_vector=north_vector)
+ # Hard-coding the origin keyword since the other two options
+ # aren't well-defined for off-axis data objects
+ PWViewerMPL.__init__(self,OffAxisProj,bounds,origin='center-window',periodic=False,oblique=True)
+ self.set_axes_unit(axes_unit)
+
_metadata_template = """
%(pf)s<br><br>
diff -r 2eee5fc43d543cc72c41c211a0c49ec1847eec14 -r 066a7b53b64de31705574ca71dd06fd130327689 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -195,7 +195,7 @@
if not iterable(width):
width = (width, width, width) # left/right, top/bottom, front/back
self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
- self.rotation_vector = self.orienter.north_vector
+ self.rotation_vector = self.orienter.unit_vectors[1]
self._setup_box_properties(width, center, self.orienter.unit_vectors)
if fields is None: fields = ["Density"]
self.fields = fields
@@ -282,7 +282,7 @@
if center is not None:
self.center = center
if north_vector is None:
- north_vector = self.orienter.north_vector
+ north_vector = self.orienter.unit_vectors[1]
if normal_vector is None:
normal_vector = self.orienter.normal_vector
self.orienter.switch_orientation(normal_vector = normal_vector,
@@ -301,7 +301,11 @@
np.array(self.width), self.transfer_function, self.sub_samples)
return args
+ star_trees = None
def get_sampler(self, args):
+ kwargs = {}
+ if self.star_trees is not None:
+ kwargs = {'star_list': self.star_trees}
if self.use_light:
if self.light_dir is None:
self.set_default_light_dir()
@@ -312,9 +316,10 @@
if self.light_rgba is None:
self.set_default_light_rgba()
sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
- light_rgba=self.light_rgba)
+ light_rgba=self.light_rgba, **kwargs)
else:
- sampler = self._sampler_object(*args)
+ sampler = self._sampler_object(*args, **kwargs)
+ print sampler, kwargs
return sampler
def finalize_image(self, image):
@@ -587,7 +592,7 @@
"""
rot_vector = self.orienter.normal_vector
R = get_rotation_matrix(theta, rot_vector)
- north_vector = self.orienter.north_vector
+ north_vector = self.orienter.unit_vectors[1]
self.switch_view(north_vector=np.dot(R, north_vector))
def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
@@ -720,6 +725,9 @@
], dtype='float64')
class HEALpixCamera(Camera):
+
+ _sampler_object = None
+
def __init__(self, center, radius, nside,
transfer_function = None, fields = None,
sub_samples = 5, log_fields = None, volume = None,
@@ -733,6 +741,12 @@
if transfer_function is None:
transfer_function = ProjectionTransferFunction()
self.transfer_function = transfer_function
+
+ if isinstance(self.transfer_function, ProjectionTransferFunction):
+ self._sampler_object = ProjectionSampler
+ else:
+ self._sampler_object = VolumeRenderSampler
+
if fields is None: fields = ["Density"]
self.fields = fields
self.sub_samples = sub_samples
@@ -1667,7 +1681,8 @@
def off_axis_projection(pf, center, normal_vector, width, resolution,
field, weight = None,
- volume = None, no_ghost = False, interpolated = False):
+ volume = None, no_ghost = False, interpolated = False,
+ north_vector = None):
r"""Project through a parameter file, off-axis, and return the image plane.
This function will accept the necessary items to integrate through a volume
@@ -1726,8 +1741,9 @@
"""
projcam = ProjectionCamera(center, normal_vector, width, resolution,
- field, weight=weight, pf=pf, volume=volume,
- no_ghost=no_ghost, interpolated=interpolated)
+ field, weight=weight, pf=pf, volume=volume,
+ no_ghost=no_ghost, interpolated=interpolated,
+ north_vector=north_vector)
image = projcam.snapshot()
if weight is not None:
pf.field_info.pop("temp_weightfield")
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list