[yt-svn] commit/yt: 2 new changesets
Bitbucket
commits-noreply at bitbucket.org
Tue May 1 11:21:18 PDT 2012
2 new commits in yt:
https://bitbucket.org/yt_analysis/yt/changeset/4a2948f427ae/
changeset: 4a2948f427ae
branch: yt
user: MatthewTurk
date: 2012-05-01 20:20:49
summary: Removing some more unused code.
affected #: 3 files
diff -r d83cfc4c6dad91076ae4eb42f1aab0cad978af38 -r 4a2948f427ae89c473145dd9c8cb73b0899914b5 yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -43,14 +43,3 @@
for atype in desc['formats']:
blanks.append(na.zeros(elements, dtype=atype))
return rec.fromarrays(blanks, **desc)
-
-class YTArrayHandler(object):
- def __getattr__(self, name):
- try:
- return object.__getattribute__(self, name)
- except AttributeError:
- return getattr(na, name)
- raise
-
-#na = YTArrayHandler()
-#print na.zeros
diff -r d83cfc4c6dad91076ae4eb42f1aab0cad978af38 -r 4a2948f427ae89c473145dd9c8cb73b0899914b5 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -35,35 +35,6 @@
output_type_registry, \
EnzoRunDatabase
-def all_pfs(basedir='.', skip=None, max_depth=1, name_spec="*.hierarchy", **kwargs):
- """
- This function searchs a directory and its sub-directories, up to a
- depth of *max_depth*, for parameter files. It looks for the
- *name_spec* and then instantiates an EnzoStaticOutput from
- each. You can skip every *skip* parameter files, if *skip* is not
- None; otherwise it will return all files. All subsequent *kwargs*
- are passed on to the EnzoStaticOutput constructor.
- """
- list_of_names = []
- basedir = os.path.expanduser(basedir)
- for i in range(max_depth):
- bb = list('*' * i) + [name_spec]
- list_of_names += glob.glob(os.path.join(basedir,*bb))
- list_of_names.sort(key=lambda b: os.path.basename(b))
- for fn in list_of_names[::skip]:
- yield load(fn[:-10], **kwargs)
-
-def max_spheres(width, unit, **kwargs):
- """
- This calls :func:`~yt.convenience.all_pfs` and then for each parameter file
- creates a :class:`~yt.data_objects.api.AMRSphereBase` for each one,
- centered on the point of highest density, with radius *width* in units of
- *unit*.
- """
- for pf in all_pfs(**kwargs):
- v, c = pf.h.find_max("Density")
- yield pf.h.sphere(c, width/pf[unit])
-
def load(*args ,**kwargs):
"""
This function attempts to determine the base data type of a filename or
@@ -140,77 +111,3 @@
f.close()
return proj
-def _chunk(arrlike, chunksize = 800000):
- total_size = arrlike.shape[0]
- pbar = get_pbar("Transferring %s " % (arrlike.name), total_size)
- start = 0; end = 0
- bits = []
- while start < total_size:
- bits.append(arrlike[start:start+chunksize])
- pbar.update(start)
- start += chunksize
- pbar.finish()
- return na.concatenate(bits)
-
-def dapload(p, axis, weight_field = None):
- r"""Load a projection dataset from a DAP server.
-
- If you have projections stored externally on a DAP server, this function
- can load them (transferring in chunks to avoid overloading) locally and
- display them.
-
- Parameters
- ----------
- p : string
- URL for the dataset on the DAP server
- axis : int
- The axis of projection to load (0, 1, 2)
- weight_field : string
- The weight_field used in the projection
-
- Returns
- -------
- projmock : ProjMock
- This is a mockup of a projection that mostly fills the API. It can be
- used with `yt.visualization.image_panner.api.VariableMeshPanner`
- objects.
-
- See Also
- --------
- http://www.opendap.org/ and http://pydap.org/2.x/ . (Note that HDF5 is not
- supported on PyDAP 3.x servers.)
-
- Examples
- --------
-
- >>> p = "http://datasets-r-us.org/output_0013.h5"
- >>> proj = dapload(p, 0, "Density")
- >>> vmp = VariableMeshPanner(proj, (512, 512), "Density", ImageSaver(0))
- >>> vmp.zoom(1.0)
- """
- class PFMock(dict):
- domain_left_edge = na.zeros(3, dtype='float64')
- domain_right_edge = na.ones(3, dtype='float64')
- pf = PFMock()
- class ProjMock(dict):
- pass
- import dap.client
- f = dap.client.open(p)
- b = f["Projections"]["%s" % (axis)]
- wf = "weight_field_%s" % weight_field
- if wf not in b: raise KeyError(wf)
- fields = []
- for k in b:
- if k.name.startswith("weight_field"): continue
- if k.name.endswith("_%s" % weight_field):
- fields.append(k.name)
- proj = ProjMock()
- for f in ["px","py","pdx","pdy"]:
- proj[f] = _chunk(b[f])
- for f in fields:
- new_name = f[:-(len(str(weight_field)) + 1)]
- proj[new_name] = _chunk(b[f])
- proj.axis = axis
- proj.pf = pf
- return proj
-
diff -r d83cfc4c6dad91076ae4eb42f1aab0cad978af38 -r 4a2948f427ae89c473145dd9c8cb73b0899914b5 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -128,7 +128,7 @@
for name, cls in callback_registry.items():
exec("%s = cls" % name)
-from yt.convenience import all_pfs, max_spheres, load, projload
+from yt.convenience import load, projload
# Import some helpful math utilities
from yt.utilities.math_utils import \
https://bitbucket.org/yt_analysis/yt/changeset/b48beb25efe0/
changeset: b48beb25efe0
branch: yt
user: MatthewTurk
date: 2012-05-01 20:21:09
summary: Merge
affected #: 5 files
diff -r 4a2948f427ae89c473145dd9c8cb73b0899914b5 -r b48beb25efe03efaf578d6b968798e87548afef9 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -57,7 +57,7 @@
candidates = []
args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
else arg for arg in args]
- valid_file = [os.path.isfile(arg) if isinstance(arg, types.StringTypes)
+ valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes)
else False for arg in args]
if not any(valid_file):
mylog.error("None of the arguments provided to load() is a valid file")
diff -r 4a2948f427ae89c473145dd9c8cb73b0899914b5 -r b48beb25efe03efaf578d6b968798e87548afef9 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -121,8 +121,37 @@
self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
self._levels = self._fhandle.keys()[1:]
AMRHierarchy.__init__(self,pf,data_style)
+ self._read_particles()
self._fhandle.close()
+ def _read_particles(self):
+ self.particle_filename = self.hierarchy_filename[:-4] + 'sink'
+ if not os.path.exists(self.particle_filename): return
+ with open(self.particle_filename, 'r') as f:
+ lines = f.readlines()
+ self.num_stars = int(lines[0].strip().split(' ')[0])
+ for line in lines[1:]:
+ particle_position_x = float(line.split(' ')[1])
+ particle_position_y = float(line.split(' ')[2])
+ particle_position_z = float(line.split(' ')[3])
+ coord = [particle_position_x, particle_position_y, particle_position_z]
+ # for each particle, determine which grids contain it
+ # copied from object_finding_mixin.py
+ mask=na.ones(self.num_grids)
+ for i in xrange(len(coord)):
+ na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+ na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+ ind = na.where(mask == 1)
+ selected_grids = self.grids[ind]
+ # in orion, particles always live on the finest level.
+ # so, we want to assign the particle to the finest of
+ # the grids we just found
+ if len(selected_grids) != 0:
+ grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+ ind = na.where(self.grids == grid)[0][0]
+ self.grid_particle_count[ind] += 1
+ self.grids[ind].NumberOfParticles += 1
+
def _initialize_data_storage(self):
pass
diff -r 4a2948f427ae89c473145dd9c8cb73b0899914b5 -r b48beb25efe03efaf578d6b968798e87548afef9 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,6 +33,7 @@
ValidateSpatial, \
ValidateGridType
import yt.data_objects.universal_fields
+import numpy as na
KnownChomboFields = FieldInfoContainer()
add_chombo_field = KnownChomboFields.add_field
@@ -76,12 +77,12 @@
units=r"",display_name=r"B_z")
KnownChomboFields["Z-magnfield"]._projected_units=r""
-add_chombo_field("energy-density", function=lambda a,b: None, take_log=True,
+add_chombo_field("energy-density", function=NullFunc, take_log=True,
validators = [ValidateDataField("energy-density")],
units=r"\rm{erg}/\rm{cm}^3")
KnownChomboFields["energy-density"]._projected_units =r""
-add_chombo_field("radiation-energy-density", function=lambda a,b: None, take_log=True,
+add_chombo_field("radiation-energy-density", function=NullFunc, take_log=True,
validators = [ValidateDataField("radiation-energy-density")],
units=r"\rm{erg}/\rm{cm}^3")
KnownChomboFields["radiation-energy-density"]._projected_units =r""
@@ -125,3 +126,36 @@
return data["Z-momentum"]/data["density"]
add_field("z-velocity",function=_zVelocity, take_log=False,
units=r'\rm{cm}/\rm{s}')
+
+def particle_func(p_field, dtype='float64'):
+ def _Particles(field, data):
+ io = data.hierarchy.io
+ if not data.NumberOfParticles > 0:
+ return na.array([], dtype=dtype)
+ else:
+ return io._read_particles(data, p_field).astype(dtype)
+
+ return _Particles
+
+_particle_field_list = ["mass",
+ "position_x",
+ "position_y",
+ "position_z",
+ "momentum_x",
+ "momentum_y",
+ "momentum_z",
+ "angmomen_x",
+ "angmomen_y",
+ "angmomen_z",
+ "mlast",
+ "mdeut",
+ "n",
+ "mdot",
+ "burnstate",
+ "id"]
+
+for pf in _particle_field_list:
+ pfunc = particle_func("particle_%s" % (pf))
+ add_field("particle_%s" % pf, function=pfunc,
+ validators = [ValidateSpatial(0)],
+ particle_type=True)
diff -r 4a2948f427ae89c473145dd9c8cb73b0899914b5 -r b48beb25efe03efaf578d6b968798e87548afef9 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,6 +25,7 @@
"""
import h5py
import re
+import numpy as na
from yt.utilities.io_handler import \
BaseIOHandler
@@ -70,3 +71,41 @@
sl[axis] = slice(coord, coord + 1)
return self._read_data_set(grid,field)[sl]
+ def _read_particles(self, grid, field):
+ """
+ parses the Orion Star Particle text files
+
+ """
+ index = {'particle_mass': 0,
+ 'particle_position_x': 1,
+ 'particle_position_y': 2,
+ 'particle_position_z': 3,
+ 'particle_momentum_x': 4,
+ 'particle_momentum_y': 5,
+ 'particle_momentum_z': 6,
+ 'particle_angmomen_x': 7,
+ 'particle_angmomen_y': 8,
+ 'particle_angmomen_z': 9,
+ 'particle_mlast': 10,
+ 'particle_mdeut': 11,
+ 'particle_n': 12,
+ 'particle_mdot': 13,
+ 'particle_burnstate': 14,
+ 'particle_id': 15}
+
+ def read(line, field):
+ return float(line.split(' ')[index[field]])
+
+ fn = grid.pf.fullplotdir[:-4] + "sink"
+ with open(fn, 'r') as f:
+ lines = f.readlines()
+ particles = []
+ for line in lines[1:]:
+ if grid.NumberOfParticles > 0:
+ coord = read(line, "particle_position_x"), \
+ read(line, "particle_position_y"), \
+ read(line, "particle_position_z")
+ if ( (grid.LeftEdge < coord).all() and
+ (coord <= grid.RightEdge).all() ):
+ particles.append(read(line, field))
+ return na.array(particles)
diff -r 4a2948f427ae89c473145dd9c8cb73b0899914b5 -r b48beb25efe03efaf578d6b968798e87548afef9 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -163,16 +163,16 @@
for i in xrange(len(coord)):
na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
- ind = na.where(mask == 1)
- selected_grids = self.grids[ind]
- # in orion, particles always live on the finest level.
- # so, we want to assign the particle to the finest of
- # the grids we just found
- if len(selected_grids) != 0:
- grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
- ind = na.where(self.grids == grid)[0][0]
- self.grid_particle_count[ind] += 1
- self.grids[ind].NumberOfParticles += 1
+ ind = na.where(mask == 1)
+ selected_grids = self.grids[ind]
+ # in orion, particles always live on the finest level.
+ # so, we want to assign the particle to the finest of
+ # the grids we just found
+ if len(selected_grids) != 0:
+ grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+ ind = na.where(self.grids == grid)[0][0]
+ self.grid_particle_count[ind] += 1
+ self.grids[ind].NumberOfParticles += 1
return True
def readGlobalHeader(self,filename,paranoid_read):
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list