[yt-svn] commit/yt: 43 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Tue Dec 3 05:11:50 PST 2013
43 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/cab93992e337/
Changeset: cab93992e337
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-19 19:56:38
Summary: First import of HTTPStream particle handler.
Affected #: 1 file
diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r cab93992e3375b3662ccec753462dbecc3cae6a1 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -28,6 +28,11 @@
from yt.geometry.oct_container import _ORDER_MAX
+try:
+ import requests
+except ImportError:
+ requests = None
+
CHUNKSIZE = 10000000
def _get_h5_handle(fn):
@@ -532,3 +537,75 @@
size = self._pdtypes[ptype].itemsize
pos += data_file.total_particles[ptype] * size
return field_offsets
+
+class IOHandlerHTTPStream(BaseIOHandler):
+ def __init__(self, pf):
+ if requests is None:
+ raise RuntimeError
+ self._url = pf.base_url
+ # This should eventually manage the IO and cache it
+
+ def _open_stream(self, data_file, field):
+ # This does not actually stream yet!
+ ftype, fname = field
+ s = "%s/%s/%s/%s" % (self._url,
+ data_file.http_url,
+ ftype, fname)
+ resp = requests.get(s)
+ if resp.status_code != 200:
+ raise RuntimeError
+ return resp.content
+
+ def _read_particle_coords(self, chunks, ptf):
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype in ptf:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ yield ptype, (c[:,0], c[:,1], c[:,2])
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype, field_list in sorted(ptf.items()):
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ mask = selector.select_points(
+ c[:,0], c[:,1], c[:,2])
+ del c
+ if mask is None: continue
+ for field in field_list:
+ s = self._open_stream(data_file, (ptype, field))
+ c = np.frombuffer(s, dtype="float64")
+ if field in ("Coordinates", "Velocities"):
+ c.shape = (c.shape[0]/3.0, 3)
+ data = c[mask, ...]
+ yield (ptype, field), data
+
+ def _initialize_index(self, data_file, regions):
+ ptypes = self.pf.json_header["particle_types"]
+ pcount = sum(self.pf.json_header["particle_count"][ptype]
+ for ptype in ptypes)
+ morton = np.empty(pcount, dtype='uint64')
+ ind = 0
+ for ptype in ptypes:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ regions.add_data_file(c, data_file.file_id)
+ morton[ind:ind+c.shape[0]] = compute_morton(
+ c[:,0], c[:,1], c[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ ind += c.shape[0]
+ return morton
https://bitbucket.org/yt_analysis/yt/commits/ec1ca9a24530/
Changeset: ec1ca9a24530
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-19 20:12:10
Summary: First attempt at HTTP Stream Particle Static Output
Affected #: 1 file
diff -r cab93992e3375b3662ccec753462dbecc3cae6a1 -r ec1ca9a245305f602500250b9f63c01090ed0da9 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -50,6 +50,11 @@
particle_deposition_functions, \
standard_particle_fields
+try:
+ import requests
+ import json
+except ImportError:
+ requests = None
class ParticleFile(object):
def __init__(self, pf, io, filename, file_id):
@@ -550,3 +555,75 @@
def _is_valid(self, *args, **kwargs):
# We do not allow load() of these files.
return False
+
+class HTTPParticleFile(ParticleFile):
+ pass
+
+class HTTPStreamStaticOutput(ParticleStaticOutput):
+ _hierarchy_class = ParticleGeometryHandler
+ _file_class = HTTPParticleFile
+ _fieldinfo_fallback = GadgetFieldInfo
+ _fieldinfo_known = KnownGadgetFields
+ _particle_mass_name = "Mass"
+ _particle_coordinates_name = "Coordinates"
+ _particle_velocity_name = "Velocities"
+
+ def __init__(self, base_url,
+ data_style = "http_particle_stream",
+ n_ref = 64, over_refine_factor=1):
+ if requests is None:
+ raise RuntimeError
+ self.base_url = base_url
+ self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
+ super(HTTPStreamStaticOutput, self).__init__("", data_style)
+
+ def __repr__(self):
+ return self.base_url
+
+ def _parse_parameter_file(self):
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.parameters["HydroMethod"] = "sph"
+
+ # Here's where we're going to grab the JSON index file
+ hreq = requests.get(self.base_url + "/yt_index.json")
+ if hreq.status != 200:
+ raise RuntimeError
+ header = json.loads(hreq.content)
+ self.parameters = header
+
+ # Now we get what we need
+ self.domain_left_edge = np.array(header['domain_left_edge'], "float64")
+ self.domain_right_edge = np.array(header['domain_right_edge'], "float64")
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
+ self.periodicity = (True, True, True)
+
+ self.unique_identifier = header.get("unique_identifier", time.time())
+ self.cosmological_simulation = int(header['cosmological_simulation'])
+ for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
+ 'hubble_constant'):
+ setattr(self, attr, float(header[attr]))
+
+ self.file_count = header['num_files']
+
+ def _set_units(self):
+ length_unit = float(self.parameters['units']['length'])
+ time_unit = float(self.parameters['units']['time'])
+ mass_unit = float(self.parameters['units']['mass'])
+ velocity_unit = length_unit / time_unit
+ self.conversion_factors["velocity"] = velocity_unit
+ self.conversion_factors["mass"] = mass_unit
+ self.conversion_factors["density"] = density_unit
+ self._unit_base['cm'] = length_unit
+ self._unit_base['s'] = time_unit
+ for u in sec_conversion:
+ self.time_units[u] = time_unit * sec_conversion[u]
+ super(HTTPStreamStaticOutput, self)._set_units()
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ if args[0].startswith("http://"):
+ return True
+ return False
https://bitbucket.org/yt_analysis/yt/commits/51f5d45de403/
Changeset: 51f5d45de403
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-19 20:54:41
Summary: I am disabling this presently, as Nathan's fix in mainline is better.
Affected #: 1 file
diff -r ec1ca9a245305f602500250b9f63c01090ed0da9 -r 51f5d45de403cac2366ab75bfb98574ea2a86e1f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
- if not os.path.exists(apath): raise IOError(filename)
+ #if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
if obj._skip_cache is False:
https://bitbucket.org/yt_analysis/yt/commits/5a128868b387/
Changeset: 5a128868b387
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-19 21:10:43
Summary: Enabling "http" to skip isfile checks.
Affected #: 1 file
diff -r 51f5d45de403cac2366ab75bfb98574ea2a86e1f -r 5a128868b387043d9147b927a0a5364f717a0407 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
if isinstance(arg, types.StringTypes):
if os.path.exists(arg):
valid_file.append(True)
+ elif arg.startswith("http"):
+ valid_file.append(True)
else:
if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
valid_file.append(True)
https://bitbucket.org/yt_analysis/yt/commits/580c10dc8eaf/
Changeset: 580c10dc8eaf
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-19 21:12:15
Summary: First working version of HTTP particle streaming.
Affected #: 2 files
diff -r 5a128868b387043d9147b927a0a5364f717a0407 -r 580c10dc8eaf5c6bce40f3aeb8e2090b39bcc73c yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -20,6 +20,7 @@
import weakref
import struct
import glob
+import time
import os
from yt.utilities.fortran_utils import read_record
@@ -567,6 +568,7 @@
_particle_mass_name = "Mass"
_particle_coordinates_name = "Coordinates"
_particle_velocity_name = "Velocities"
+ filename_template = ""
def __init__(self, base_url,
data_style = "http_particle_stream",
@@ -588,9 +590,11 @@
# Here's where we're going to grab the JSON index file
hreq = requests.get(self.base_url + "/yt_index.json")
- if hreq.status != 200:
+ if hreq.status_code != 200:
raise RuntimeError
header = json.loads(hreq.content)
+ header['particle_count'] = dict((int(k), header['particle_count'][k])
+ for k in header['particle_count'])
self.parameters = header
# Now we get what we need
@@ -600,6 +604,7 @@
self.domain_dimensions = np.ones(3, "int32") * nz
self.periodicity = (True, True, True)
+ self.current_time = header['current_time']
self.unique_identifier = header.get("unique_identifier", time.time())
self.cosmological_simulation = int(header['cosmological_simulation'])
for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
@@ -612,15 +617,15 @@
length_unit = float(self.parameters['units']['length'])
time_unit = float(self.parameters['units']['time'])
mass_unit = float(self.parameters['units']['mass'])
+ density_unit = mass_unit / length_unit ** 3
velocity_unit = length_unit / time_unit
+ self._unit_base = {}
+ self._unit_base['cm'] = length_unit
+ self._unit_base['s'] = time_unit
+ super(HTTPStreamStaticOutput, self)._set_units()
self.conversion_factors["velocity"] = velocity_unit
self.conversion_factors["mass"] = mass_unit
self.conversion_factors["density"] = density_unit
- self._unit_base['cm'] = length_unit
- self._unit_base['s'] = time_unit
- for u in sec_conversion:
- self.time_units[u] = time_unit * sec_conversion[u]
- super(HTTPStreamStaticOutput, self)._set_units()
@classmethod
def _is_valid(self, *args, **kwargs):
diff -r 5a128868b387043d9147b927a0a5364f717a0407 -r 580c10dc8eaf5c6bce40f3aeb8e2090b39bcc73c yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -539,23 +539,33 @@
return field_offsets
class IOHandlerHTTPStream(BaseIOHandler):
+ _data_style = "http_particle_stream"
+ _vector_fields = ("Coordinates", "Velocity", "Velocities")
+
def __init__(self, pf):
if requests is None:
raise RuntimeError
self._url = pf.base_url
# This should eventually manage the IO and cache it
+ super(IOHandlerHTTPStream, self).__init__(pf)
def _open_stream(self, data_file, field):
# This does not actually stream yet!
ftype, fname = field
s = "%s/%s/%s/%s" % (self._url,
- data_file.http_url,
- ftype, fname)
+ data_file.file_id, ftype, fname)
+ mylog.info("Loading URL %s", s)
resp = requests.get(s)
if resp.status_code != 200:
raise RuntimeError
return resp.content
+ def _identify_fields(self, data_file):
+ f = []
+ for ftype, fname in self.pf.parameters["field_list"]:
+ f.append((str(ftype), str(fname)))
+ return f
+
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
data_files = set([])
@@ -593,9 +603,9 @@
yield (ptype, field), data
def _initialize_index(self, data_file, regions):
- ptypes = self.pf.json_header["particle_types"]
- pcount = sum(self.pf.json_header["particle_count"][ptype]
- for ptype in ptypes)
+ header = self.pf.parameters
+ ptypes = header["particle_count"][data_file.file_id].keys()
+ pcount = sum(header["particle_count"][data_file.file_id].values())
morton = np.empty(pcount, dtype='uint64')
ind = 0
for ptype in ptypes:
@@ -609,3 +619,6 @@
data_file.pf.domain_right_edge)
ind += c.shape[0]
return morton
+
+ def _count_particles(self, data_file):
+ return self.pf.parameters["particle_count"][data_file.file_id]
https://bitbucket.org/yt_analysis/yt/commits/f8bbe914dd12/
Changeset: f8bbe914dd12
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-19 21:30:52
Summary: This initializes the units correctly for HTTP particles.
Affected #: 2 files
diff -r 580c10dc8eaf5c6bce40f3aeb8e2090b39bcc73c -r f8bbe914dd12559e403a7ec603160ea25d29e4c5 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -620,8 +620,8 @@
density_unit = mass_unit / length_unit ** 3
velocity_unit = length_unit / time_unit
self._unit_base = {}
- self._unit_base['cm'] = length_unit
- self._unit_base['s'] = time_unit
+ self._unit_base['cm'] = 1.0/length_unit
+ self._unit_base['s'] = 1.0/time_unit
super(HTTPStreamStaticOutput, self)._set_units()
self.conversion_factors["velocity"] = velocity_unit
self.conversion_factors["mass"] = mass_unit
diff -r 580c10dc8eaf5c6bce40f3aeb8e2090b39bcc73c -r f8bbe914dd12559e403a7ec603160ea25d29e4c5 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -597,7 +597,7 @@
for field in field_list:
s = self._open_stream(data_file, (ptype, field))
c = np.frombuffer(s, dtype="float64")
- if field in ("Coordinates", "Velocities"):
+ if field in self._vector_fields:
c.shape = (c.shape[0]/3.0, 3)
data = c[mask, ...]
yield (ptype, field), data
https://bitbucket.org/yt_analysis/yt/commits/db926677a1f4/
Changeset: db926677a1f4
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-19 21:47:42
Summary: Adding a counter for how much data has been streamed.
Affected #: 1 file
diff -r f8bbe914dd12559e403a7ec603160ea25d29e4c5 -r db926677a1f44bced900b0f4f5484f5aac80c4e2 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -547,6 +547,7 @@
raise RuntimeError
self._url = pf.base_url
# This should eventually manage the IO and cache it
+ self.total_bytes = 0
super(IOHandlerHTTPStream, self).__init__(pf)
def _open_stream(self, data_file, field):
@@ -558,6 +559,7 @@
resp = requests.get(s)
if resp.status_code != 200:
raise RuntimeError
+ self.total_bytes += len(resp.content)
return resp.content
def _identify_fields(self, data_file):
https://bitbucket.org/yt_analysis/yt/commits/dfaa8f1d5f96/
Changeset: dfaa8f1d5f96
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 15:32:01
Summary: Merged in MatthewTurk/yt-3.0 (pull request #136)
HTTP loading for particles
Affected #: 4 files
diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r dfaa8f1d5f9622a8db64e1ec39f9d280788d04df yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
if isinstance(arg, types.StringTypes):
if os.path.exists(arg):
valid_file.append(True)
+ elif arg.startswith("http"):
+ valid_file.append(True)
else:
if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
valid_file.append(True)
diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r dfaa8f1d5f9622a8db64e1ec39f9d280788d04df yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
- if not os.path.exists(apath): raise IOError(filename)
+ #if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
if obj._skip_cache is False:
diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r dfaa8f1d5f9622a8db64e1ec39f9d280788d04df yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -20,6 +20,7 @@
import weakref
import struct
import glob
+import time
import os
from yt.utilities.fortran_utils import read_record
@@ -50,6 +51,11 @@
particle_deposition_functions, \
standard_particle_fields
+try:
+ import requests
+ import json
+except ImportError:
+ requests = None
class ParticleFile(object):
def __init__(self, pf, io, filename, file_id):
@@ -550,3 +556,79 @@
def _is_valid(self, *args, **kwargs):
# We do not allow load() of these files.
return False
+
+class HTTPParticleFile(ParticleFile):
+ pass
+
+class HTTPStreamStaticOutput(ParticleStaticOutput):
+ _hierarchy_class = ParticleGeometryHandler
+ _file_class = HTTPParticleFile
+ _fieldinfo_fallback = GadgetFieldInfo
+ _fieldinfo_known = KnownGadgetFields
+ _particle_mass_name = "Mass"
+ _particle_coordinates_name = "Coordinates"
+ _particle_velocity_name = "Velocities"
+ filename_template = ""
+
+ def __init__(self, base_url,
+ data_style = "http_particle_stream",
+ n_ref = 64, over_refine_factor=1):
+ if requests is None:
+ raise RuntimeError
+ self.base_url = base_url
+ self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
+ super(HTTPStreamStaticOutput, self).__init__("", data_style)
+
+ def __repr__(self):
+ return self.base_url
+
+ def _parse_parameter_file(self):
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.parameters["HydroMethod"] = "sph"
+
+ # Here's where we're going to grab the JSON index file
+ hreq = requests.get(self.base_url + "/yt_index.json")
+ if hreq.status_code != 200:
+ raise RuntimeError
+ header = json.loads(hreq.content)
+ header['particle_count'] = dict((int(k), header['particle_count'][k])
+ for k in header['particle_count'])
+ self.parameters = header
+
+ # Now we get what we need
+ self.domain_left_edge = np.array(header['domain_left_edge'], "float64")
+ self.domain_right_edge = np.array(header['domain_right_edge'], "float64")
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
+ self.periodicity = (True, True, True)
+
+ self.current_time = header['current_time']
+ self.unique_identifier = header.get("unique_identifier", time.time())
+ self.cosmological_simulation = int(header['cosmological_simulation'])
+ for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
+ 'hubble_constant'):
+ setattr(self, attr, float(header[attr]))
+
+ self.file_count = header['num_files']
+
+ def _set_units(self):
+ length_unit = float(self.parameters['units']['length'])
+ time_unit = float(self.parameters['units']['time'])
+ mass_unit = float(self.parameters['units']['mass'])
+ density_unit = mass_unit / length_unit ** 3
+ velocity_unit = length_unit / time_unit
+ self._unit_base = {}
+ self._unit_base['cm'] = 1.0/length_unit
+ self._unit_base['s'] = 1.0/time_unit
+ super(HTTPStreamStaticOutput, self)._set_units()
+ self.conversion_factors["velocity"] = velocity_unit
+ self.conversion_factors["mass"] = mass_unit
+ self.conversion_factors["density"] = density_unit
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ if args[0].startswith("http://"):
+ return True
+ return False
diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r dfaa8f1d5f9622a8db64e1ec39f9d280788d04df yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -28,6 +28,11 @@
from yt.geometry.oct_container import _ORDER_MAX
+try:
+ import requests
+except ImportError:
+ requests = None
+
CHUNKSIZE = 10000000
def _get_h5_handle(fn):
@@ -543,3 +548,90 @@
size = self._pdtypes[ptype].itemsize
pos += data_file.total_particles[ptype] * size
return field_offsets
+
+class IOHandlerHTTPStream(BaseIOHandler):
+ _data_style = "http_particle_stream"
+ _vector_fields = ("Coordinates", "Velocity", "Velocities")
+
+ def __init__(self, pf):
+ if requests is None:
+ raise RuntimeError
+ self._url = pf.base_url
+ # This should eventually manage the IO and cache it
+ self.total_bytes = 0
+ super(IOHandlerHTTPStream, self).__init__(pf)
+
+ def _open_stream(self, data_file, field):
+ # This does not actually stream yet!
+ ftype, fname = field
+ s = "%s/%s/%s/%s" % (self._url,
+ data_file.file_id, ftype, fname)
+ mylog.info("Loading URL %s", s)
+ resp = requests.get(s)
+ if resp.status_code != 200:
+ raise RuntimeError
+ self.total_bytes += len(resp.content)
+ return resp.content
+
+ def _identify_fields(self, data_file):
+ f = []
+ for ftype, fname in self.pf.parameters["field_list"]:
+ f.append((str(ftype), str(fname)))
+ return f
+
+ def _read_particle_coords(self, chunks, ptf):
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype in ptf:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ yield ptype, (c[:,0], c[:,1], c[:,2])
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype, field_list in sorted(ptf.items()):
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ mask = selector.select_points(
+ c[:,0], c[:,1], c[:,2])
+ del c
+ if mask is None: continue
+ for field in field_list:
+ s = self._open_stream(data_file, (ptype, field))
+ c = np.frombuffer(s, dtype="float64")
+ if field in self._vector_fields:
+ c.shape = (c.shape[0]/3.0, 3)
+ data = c[mask, ...]
+ yield (ptype, field), data
+
+ def _initialize_index(self, data_file, regions):
+ header = self.pf.parameters
+ ptypes = header["particle_count"][data_file.file_id].keys()
+ pcount = sum(header["particle_count"][data_file.file_id].values())
+ morton = np.empty(pcount, dtype='uint64')
+ ind = 0
+ for ptype in ptypes:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ regions.add_data_file(c, data_file.file_id)
+ morton[ind:ind+c.shape[0]] = compute_morton(
+ c[:,0], c[:,1], c[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ ind += c.shape[0]
+ return morton
+
+ def _count_particles(self, data_file):
+ return self.pf.parameters["particle_count"][data_file.file_id]
https://bitbucket.org/yt_analysis/yt/commits/1aa69366f31d/
Changeset: 1aa69366f31d
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-14 02:48:16
Summary: First set of work toward inserting a union-find data structure into the contour
finder. Segfaults.
Affected #: 2 files
diff -r 29e24eb0f722d4e4d907b58ed1398eee652a8bdf -r 1aa69366f31dfd1cf7f8b94dc7f5aa8f89c5b2ea yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -62,7 +62,7 @@
pbar = get_pbar("First pass", len(data_source._grids))
grids = sorted(data_source._grids, key=lambda g: -g.Level)
total_contours = 0
- tree = []
+ tree = amr_utils.ContourTree()
for gi,grid in enumerate(grids):
pbar.update(gi+1)
cm = data_source._get_cut_mask(grid)
@@ -85,10 +85,9 @@
zi = zi_u[cor_order]
while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
pass
- total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
- new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
- tree += zip(new_contours, new_contours)
- tree = set(tree)
+ total_contours += na.unique(grid["tempContours"][grid["tempContours"] > -1]).size
+ new_contours = na.unique(grid["tempContours"][grid["tempContours"] > -1])
+ tree.add_contours(new_contours)
pbar.finish()
pbar = get_pbar("Calculating joins ", len(data_source._grids))
grid_set = set()
@@ -98,29 +97,13 @@
grid_set.update(set(cg._grids))
fd = cg["tempContours"].astype('int64')
boundary_tree = amr_utils.construct_boundary_relationships(fd)
- tree.update(((a, b) for a, b in boundary_tree))
+ tree.add_joins(boundary_tree)
pbar.finish()
- sort_new = np.array(list(tree), dtype='int64')
- mylog.info("Coalescing %s joins", sort_new.shape[0])
- joins = coalesce_join_tree(sort_new)
- #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
- pbar = get_pbar("Joining ", len(joins))
- # This process could and should be done faster
- print "Joining..."
- t1 = time.time()
+ sort_new = na.array(list(tree), dtype='int64')
+ joins = tree.export()
ff = data_source["tempContours"].astype("int64")
amr_utils.update_joins(joins, ff)
data_source["tempContours"] = ff.astype("float64")
- #for i, new in enumerate(sorted(joins.keys())):
- # pbar.update(i)
- # old_set = joins[new]
- # for old in old_set:
- # if old == new: continue
- # i1 = (data_source["tempContours"] == old)
- # data_source["tempContours"][i1] = new
- t2 = time.time()
- print "Finished joining in %0.2e seconds" % (t2-t1)
- pbar.finish()
data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
del data_source.field_data["tempContours"] # Force a reload from the grids
data_source.get_data("tempContours")
diff -r 29e24eb0f722d4e4d907b58ed1398eee652a8bdf -r 1aa69366f31dfd1cf7f8b94dc7f5aa8f89c5b2ea yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -29,49 +29,114 @@
if i0 < i1: return i0
return i1
-cdef extern from "union_find.h":
- ctypedef struct forest_node:
- void *value
- forest_node *parent
- int rank
+cdef struct ContourID
- forest_node* MakeSet(void* value)
- void Union(forest_node* node1, forest_node* node2)
- forest_node* Find(forest_node* node)
+cdef struct ContourID:
+ np.int64_t contour_id
+ int rank
+ ContourID *parent
+ ContourID *next
+ ContourID *prev
-ctypedef struct CellIdentifier:
- np.int64_t hindex
- int level
+cdef ContourID *contour_create(np.int64_t contour_id,
+ ContourID *prev = NULL):
+ node = <ContourID *> malloc(sizeof(ContourID *))
+ node.contour_id = contour_id
+ node.parent = NULL
+ node.rank = 0
+ node.prev = prev
+ if prev != NULL: prev.next = node
+ return node
-cdef class GridContourContainer:
- cdef np.int64_t dims[3]
- cdef np.int64_t start_indices[3]
- cdef forest_node **join_tree
- cdef np.int64_t ncells
+cdef void contour_delete(ContourID *node):
+ if node.prev != NULL: node.prev.next = node.next
+ if node.next != NULL: node.next.prev = node.prev
+ free(node)
- def __init__(self, dimensions, indices):
- cdef int i
- self.ncells = 1
- for i in range(3):
- self.ncells *= dimensions[i]
- self.dims[i] = dimensions[i]
- self.start_indices[i] = indices[i]
- self.join_tree = <forest_node **> malloc(sizeof(forest_node)
- * self.ncells)
- for i in range(self.ncells): self.join_tree[i] = NULL
+cdef ContourID *contour_find(ContourID *node):
+ cdef ContourID *temp, *root
+ root = node
+ while root.parent != NULL:
+ root = root.parent
+ while node.parent != NULL:
+ temp = node.parent
+ node.parent = root
+ node = temp
+ return root
+cdef void contour_union(ContourID *node1, ContourID *node2):
+ if node1.rank > node2.rank:
+ node2.parent = node1
+ elif node2.rank > node1.rank:
+ node1.parent = node2
+ else:
+ node2.parent = node1
+ node1.rank += 1
+
+cdef class ContourTree:
+ cdef ContourID *first
+ cdef ContourID *last
+
+ def clear(self):
+ # Here, we wipe out ALL of our contours, but not the pointers to them
+ cdef ContourID *cur, *next
+ cur = self.first
+ while cur != NULL:
+ next = cur.next
+ free(cur)
+ cur = next
+
+ def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
+ cdef int i, n
+ n = contour_ids.shape[0]
+ cdef ContourID *cur = self.last
+ for i in range(n):
+ cur = contour_create(contour_ids[i], cur)
+ if self.first == NULL: self.first = cur
+ self.last = cur
+
+ def add_contour(self, np.int64_t contour_id):
+ self.last = contour_create(contour_id, self.last)
+
+ def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
+ cdef int i, n
+ cdef np.int64_t cid1, cid2
+ # Okay, this requires lots of iteration, unfortunately
+ cdef ContourID *cur, *root
+ n = join_tree.shape[0]
+ for i in range(n):
+ cid1 = join_tree[n, 0]
+ cid2 = join_tree[n, 1]
+ c1 = c2 = NULL
+ while c1 == NULL and c2 == NULL and cur != NULL:
+ if cur.contour_id == cid1:
+ c1 = contour_find(cur)
+ if cur.contour_id == cid2:
+ c2 = contour_find(cur)
+ cur == cur.next
+ if c1 == NULL or c2 == NULL: raise RuntimeError
+ contour_union(c1, c2)
+
+ def export(self):
+ cdef int n = 0
+ cdef ContourID *cur, *root
+ cur = self.first
+ while cur != NULL:
+ cur = cur.next
+ n += 1
+ cdef np.ndarray[np.int64_t, ndim=2] joins
+ joins = np.empty((n, 2), dtype="int64")
+ n = 0
+ while cur != NULL:
+ root = contour_find(cur)
+ joins[n, 0] = cur.contour_id
+ joins[n, 1] = root.contour_id
+ cur = cur.next
+ n += 1
+ return joins
+
def __dealloc__(self):
- cdef int i
- for i in range(self.ncells):
- if self.join_tree[i] != NULL: free(self.join_tree[i])
- free(self.join_tree)
-
- #def construct_join_tree(self,
- # np.ndarray[np.float64_t, ndim=3] field,
- # np.ndarray[np.bool_t, ndim=3] mask):
- # # This only looks at the components of the grid that are actually
- # # inside this grid -- boundary conditions are handled later.
- # pass
+ self.clear()
#@cython.boundscheck(False)
#@cython.wraparound(False)
@@ -228,16 +293,13 @@
@cython.boundscheck(False)
@cython.wraparound(False)
-def update_joins(joins, np.ndarray[np.int64_t, ndim=1] contour_ids):
- cdef np.int64_t new, old, i, oi
- cdef int n, on
- cdef np.ndarray[np.int64_t, ndim=1] old_set
- #print contour_ids.shape[0]
- n = contour_ids.shape[0]
- for new, old_set in joins:
- #print new
- on = old_set.shape[0]
- for i in range(n):
- for oi in range(on):
- old = old_set[oi]
- if contour_ids[i] == old: contour_ids[i] = new
+def update_joins(np.ndarray[np.int64_t, ndim=2] joins,
+ np.ndarray[np.int64_t, ndim=1] contour_ids):
+ cdef np.int64_t new, old
+ cdef int i, j, nc, nj
+ nc = contour_ids.shape[0]
+ nj = joins.shape[0]
+ for i in range(nc):
+ for j in range(nj):
+ if contour_ids[i] == joins[j,0]:
+ contour_ids[i] = joins[j,1]
https://bitbucket.org/yt_analysis/yt/commits/321623d35d6d/
Changeset: 321623d35d6d
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-14 14:31:36
Summary: This solution runs to completion. Benchmarks next.
Affected #: 2 files
diff -r 1aa69366f31dfd1cf7f8b94dc7f5aa8f89c5b2ea -r 321623d35d6da110b90985bd13c140222be656c0 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -96,10 +96,13 @@
cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
grid_set.update(set(cg._grids))
fd = cg["tempContours"].astype('int64')
- boundary_tree = amr_utils.construct_boundary_relationships(fd)
- tree.add_joins(boundary_tree)
+ bt = amr_utils.construct_boundary_relationships(fd)
+ # This recipe is from josef.pktd on the SciPy mailing list:
+ # http://mail.scipy.org/pipermail/numpy-discussion/2009-August/044664.html
+ c = bt.view([('',bt.dtype)]*bt.shape[1])
+ bt = na.unique(c).view(bt.dtype).reshape(-1,bt.shape[1])
+ tree.add_joins(bt)
pbar.finish()
- sort_new = na.array(list(tree), dtype='int64')
joins = tree.export()
ff = data_source["tempContours"].astype("int64")
amr_utils.update_joins(joins, ff)
diff -r 1aa69366f31dfd1cf7f8b94dc7f5aa8f89c5b2ea -r 321623d35d6da110b90985bd13c140222be656c0 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -40,9 +40,9 @@
cdef ContourID *contour_create(np.int64_t contour_id,
ContourID *prev = NULL):
- node = <ContourID *> malloc(sizeof(ContourID *))
+ node = <ContourID *> malloc(sizeof(ContourID))
node.contour_id = contour_id
- node.parent = NULL
+ node.next = node.parent = NULL
node.rank = 0
node.prev = prev
if prev != NULL: prev.next = node
@@ -56,8 +56,9 @@
cdef ContourID *contour_find(ContourID *node):
cdef ContourID *temp, *root
root = node
- while root.parent != NULL:
+ while root.parent != NULL and root.parent != root:
root = root.parent
+ if root == root.parent: root.parent = NULL
while node.parent != NULL:
temp = node.parent
node.parent = root
@@ -85,12 +86,17 @@
next = cur.next
free(cur)
cur = next
+ self.first = self.last = NULL
+
+ def __init__(self):
+ self.first = self.last = NULL
def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
cdef int i, n
n = contour_ids.shape[0]
cdef ContourID *cur = self.last
for i in range(n):
+ #print i, contour_ids[i]
cur = contour_create(contour_ids[i], cur)
if self.first == NULL: self.first = cur
self.last = cur
@@ -99,31 +105,48 @@
self.last = contour_create(contour_id, self.last)
def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
- cdef int i, n
+ cdef int i, n, ins
cdef np.int64_t cid1, cid2
# Okay, this requires lots of iteration, unfortunately
cdef ContourID *cur, *root
n = join_tree.shape[0]
+ #print "Counting"
+ #print "Checking", self.count()
for i in range(n):
- cid1 = join_tree[n, 0]
- cid2 = join_tree[n, 1]
+ ins = 0
+ cid1 = join_tree[i, 0]
+ cid2 = join_tree[i, 1]
c1 = c2 = NULL
- while c1 == NULL and c2 == NULL and cur != NULL:
+ cur = self.first
+ #print "Looking for ", cid1, cid2
+ while c1 == NULL or c2 == NULL:
if cur.contour_id == cid1:
c1 = contour_find(cur)
if cur.contour_id == cid2:
c2 = contour_find(cur)
- cur == cur.next
- if c1 == NULL or c2 == NULL: raise RuntimeError
- contour_union(c1, c2)
+ ins += 1
+ cur = cur.next
+ if cur == NULL: break
+ if c1 == NULL or c2 == NULL:
+ if c1 == NULL: print " Couldn't find ", cid1
+ if c2 == NULL: print " Couldn't find ", cid2
+ print " Inspected ", ins
+ raise RuntimeError
+ else:
+ contour_union(c1, c2)
- def export(self):
+ def count(self):
cdef int n = 0
- cdef ContourID *cur, *root
- cur = self.first
+ cdef ContourID *cur = self.first
while cur != NULL:
cur = cur.next
n += 1
+ return n
+
+ def export(self):
+ cdef int n = self.count()
+ cdef ContourID *cur, *root
+ cur = self.first
cdef np.ndarray[np.int64_t, ndim=2] joins
joins = np.empty((n, 2), dtype="int64")
n = 0
@@ -303,3 +326,4 @@
for j in range(nj):
if contour_ids[i] == joins[j,0]:
contour_ids[i] = joins[j,1]
+ break
https://bitbucket.org/yt_analysis/yt/commits/b9c2d10734ea/
Changeset: b9c2d10734ea
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-14 15:43:57
Summary: Adding optimizations
Affected #: 1 file
diff -r 321623d35d6da110b90985bd13c140222be656c0 -r b9c2d10734ea2a08195baa0136d254e75b6de267 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -91,6 +91,8 @@
def __init__(self):
self.first = self.last = NULL
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
cdef int i, n
n = contour_ids.shape[0]
@@ -104,6 +106,8 @@
def add_contour(self, np.int64_t contour_id):
self.last = contour_create(contour_id, self.last)
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
cdef int i, n, ins
cdef np.int64_t cid1, cid2
@@ -143,6 +147,8 @@
n += 1
return n
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
def export(self):
cdef int n = self.count()
cdef ContourID *cur, *root
@@ -161,8 +167,8 @@
def __dealloc__(self):
self.clear()
-#@cython.boundscheck(False)
-#@cython.wraparound(False)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
def construct_boundary_relationships(
np.ndarray[dtype=np.int64_t, ndim=3] contour_ids):
# We only look at the boundary and one cell in
https://bitbucket.org/yt_analysis/yt/commits/ec067cef9d6a/
Changeset: ec067cef9d6a
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-14 22:51:47
Summary: Remove a bunch of calls to numpy's unique function (since we have so few final
values) and replace them with linked list checks inside culling routines.
Affected #: 3 files
diff -r b9c2d10734ea2a08195baa0136d254e75b6de267 -r ec067cef9d6ab2c4fefba836a99dd856a253fa75 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -61,7 +61,6 @@
cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
pbar = get_pbar("First pass", len(data_source._grids))
grids = sorted(data_source._grids, key=lambda g: -g.Level)
- total_contours = 0
tree = amr_utils.ContourTree()
for gi,grid in enumerate(grids):
pbar.update(gi+1)
@@ -85,8 +84,7 @@
zi = zi_u[cor_order]
while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
pass
- total_contours += na.unique(grid["tempContours"][grid["tempContours"] > -1]).size
- new_contours = na.unique(grid["tempContours"][grid["tempContours"] > -1])
+ new_contours = tree.cull_candidates(grid["tempContours"])
tree.add_contours(new_contours)
pbar.finish()
pbar = get_pbar("Calculating joins ", len(data_source._grids))
@@ -99,9 +97,8 @@
bt = amr_utils.construct_boundary_relationships(fd)
# This recipe is from josef.pktd on the SciPy mailing list:
# http://mail.scipy.org/pipermail/numpy-discussion/2009-August/044664.html
- c = bt.view([('',bt.dtype)]*bt.shape[1])
- bt = na.unique(c).view(bt.dtype).reshape(-1,bt.shape[1])
- tree.add_joins(bt)
+ joins = tree.cull_joins(bt)
+ tree.add_joins(joins)
pbar.finish()
joins = tree.export()
ff = data_source["tempContours"].astype("int64")
@@ -112,11 +109,14 @@
data_source.get_data("tempContours")
contour_ind = {}
i = 0
- for contour_id in np.unique(data_source["tempContours"]):
- if contour_id == -1: continue
- contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
+ handled = set()
+ for contour_id in data_source["tempContours"]:
+ if contour_id == -1 or contour_id in handled: continue
+ handled.add(contour_id)
+ contour_ind[i] = na.where(data_source["tempContours"] == contour_id)
mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
i += 1
+ print "TREE ENTRIES", tree.count()
mylog.info("Identified %s contours between %0.5e and %0.5e",
len(contour_ind.keys()),min_val,max_val)
for grid in chain(grid_set):
diff -r b9c2d10734ea2a08195baa0136d254e75b6de267 -r ec067cef9d6ab2c4fefba836a99dd856a253fa75 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -985,9 +985,9 @@
from yt.analysis_modules.level_sets.api import identify_contours
cids = identify_contours(self, field, cons[level], mv,
cached_fields)
- for cid, cid_ind in cids.items():
- contours[level][cid] = self.extract_region(cid_ind)
- return cons, contours
+ #for cid, cid_ind in cids.items():
+ # contours[level][cid] = self.extract_region(cid_ind)
+ return #cons, contours
def paint_grids(self, field, value, default_value=None):
"""
diff -r b9c2d10734ea2a08195baa0136d254e75b6de267 -r ec067cef9d6ab2c4fefba836a99dd856a253fa75 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -74,6 +74,32 @@
node2.parent = node1
node1.rank += 1
+cdef struct CandidateContour
+
+cdef struct CandidateContour:
+ np.int64_t contour_id
+ np.int64_t join_id
+ CandidateContour *next
+
+cdef int candidate_contains(CandidateContour *first,
+ np.int64_t contour_id,
+ np.int64_t join_id = -1):
+ while first != NULL:
+ if first.contour_id == contour_id \
+ and first.join_id == join_id: return 1
+ first = first.next
+ return 0
+
+cdef CandidateContour *candidate_add(CandidateContour *first,
+ np.int64_t contour_id,
+ np.int64_t join_id = -1):
+ cdef CandidateContour *node
+ node = <CandidateContour *> malloc(sizeof(CandidateContour))
+ node.contour_id = contour_id
+ node.join_id = join_id
+ node.next = first
+ return node
+
cdef class ContourTree:
cdef ContourID *first
cdef ContourID *last
@@ -106,6 +132,62 @@
def add_contour(self, np.int64_t contour_id):
self.last = contour_create(contour_id, self.last)
+ def cull_candidates(self, np.ndarray[np.int64_t, ndim=3] candidates):
+ # This is a helper function.
+ cdef int i, j, k, ni, nj, nk, nc
+ cdef CandidateContour *first = NULL
+ cdef CandidateContour *temp
+ cdef np.int64_t cid
+ nc = 0
+ ni = candidates.shape[0]
+ nj = candidates.shape[1]
+ nk = candidates.shape[2]
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ cid = candidates[i,j,k]
+ if cid == -1: continue
+ if candidate_contains(first, cid) == 0:
+ nc += 1
+ first = candidate_add(first, cid)
+ cdef np.ndarray[np.int64_t, ndim=1] contours
+ contours = np.empty(nc, dtype="int64")
+ i = 0
+ while first != NULL:
+ contours[i] = first.contour_id
+ i += 1
+ temp = first.next
+ free(first)
+ first = temp
+ return contours
+
+ def cull_joins(self, np.ndarray[np.int64_t, ndim=2] cjoins):
+ cdef int i, j, k, ni, nj, nk, nc
+ cdef CandidateContour *first = NULL
+ cdef CandidateContour *temp
+ cdef np.int64_t cid1, cid2
+ nc = 0
+ ni = cjoins.shape[0]
+ for i in range(ni):
+ cid1 = cjoins[i,0]
+ cid2 = cjoins[i,1]
+ if cid1 == -1: continue
+ if cid2 == -1: continue
+ if candidate_contains(first, cid1, cid2) == 0:
+ nc += 1
+ first = candidate_add(first, cid1, cid2)
+ cdef np.ndarray[np.int64_t, ndim=2] contours
+ contours = np.empty((nc,2), dtype="int64")
+ i = 0
+ while first != NULL:
+ contours[i,0] = first.contour_id
+ contours[i,1] = first.join_id
+ i += 1
+ temp = first.next
+ free(first)
+ first = temp
+ return contours
+
@cython.boundscheck(False)
@cython.wraparound(False)
def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
https://bitbucket.org/yt_analysis/yt/commits/f0d7bac4c451/
Changeset: f0d7bac4c451
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-14 23:05:10
Summary: Re-enabling actual extraction.
Affected #: 1 file
diff -r ec067cef9d6ab2c4fefba836a99dd856a253fa75 -r f0d7bac4c45128bc184900c64ad0fde06ec63b64 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -985,9 +985,9 @@
from yt.analysis_modules.level_sets.api import identify_contours
cids = identify_contours(self, field, cons[level], mv,
cached_fields)
- #for cid, cid_ind in cids.items():
- # contours[level][cid] = self.extract_region(cid_ind)
- return #cons, contours
+ for cid, cid_ind in cids.items():
+ contours[level][cid] = self.extract_region(cid_ind)
+ return cons, contours
def paint_grids(self, field, value, default_value=None):
"""
https://bitbucket.org/yt_analysis/yt/commits/6786f57c376c/
Changeset: 6786f57c376c
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-15 18:24:48
Summary: Consolidate a couple routines, which speeds things a bit.
Affected #: 3 files
diff -r f0d7bac4c45128bc184900c64ad0fde06ec63b64 -r 6786f57c376c436828b54b8843e9f4cb7fbf1fc2 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -58,33 +58,23 @@
def identify_contours(data_source, field, min_val, max_val,
cached_fields=None):
- cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
pbar = get_pbar("First pass", len(data_source._grids))
grids = sorted(data_source._grids, key=lambda g: -g.Level)
tree = amr_utils.ContourTree()
+ gct = amr_utils.GridContourTree(min_val, max_val)
+ total_contours = 0
for gi,grid in enumerate(grids):
pbar.update(gi+1)
cm = data_source._get_cut_mask(grid)
- if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
+ if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='int32')
old_field_parameters = grid.field_parameters
grid.field_parameters = data_source.field_parameters
- local_ind = np.where( (grid[field] > min_val)
- & (grid[field] < max_val) & cm )
+ values = grid[field]
grid.field_parameters = old_field_parameters
- if local_ind[0].size == 0: continue
- kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
- grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
- grid["tempContours"][local_ind] = kk[:]
- cur_max_id -= local_ind[0].size
- xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
- cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
- fd_orig = grid["tempContours"].copy()
- xi = xi_u[cor_order]
- yi = yi_u[cor_order]
- zi = zi_u[cor_order]
- while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
- pass
+ grid["tempContours"] = na.zeros(grid.ActiveDimensions, "int64") - 1
+ gct.identify_contours(values, grid["tempContours"], cm, total_contours)
new_contours = tree.cull_candidates(grid["tempContours"])
+ total_contours += new_contours.shape[0]
tree.add_contours(new_contours)
pbar.finish()
pbar = get_pbar("Calculating joins ", len(data_source._grids))
diff -r f0d7bac4c45128bc184900c64ad0fde06ec63b64 -r 6786f57c376c436828b54b8843e9f4cb7fbf1fc2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -985,9 +985,9 @@
from yt.analysis_modules.level_sets.api import identify_contours
cids = identify_contours(self, field, cons[level], mv,
cached_fields)
- for cid, cid_ind in cids.items():
- contours[level][cid] = self.extract_region(cid_ind)
- return cons, contours
+ #for cid, cid_ind in cids.items():
+ # contours[level][cid] = self.extract_region(cid_ind)
+ return #cons, contours
def paint_grids(self, field, value, default_value=None):
"""
diff -r f0d7bac4c45128bc184900c64ad0fde06ec63b64 -r 6786f57c376c436828b54b8843e9f4cb7fbf1fc2 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -21,6 +21,10 @@
cdef extern from "math.h":
double fabs(double x)
+cdef extern from "stdlib.h":
+ # NOTE that size_t might not be int
+ void *alloca(int)
+
cdef inline np.int64_t i64max(np.int64_t i0, np.int64_t i1):
if i0 > i1: return i0
return i1
@@ -33,7 +37,6 @@
cdef struct ContourID:
np.int64_t contour_id
- int rank
ContourID *parent
ContourID *next
ContourID *prev
@@ -41,9 +44,9 @@
cdef ContourID *contour_create(np.int64_t contour_id,
ContourID *prev = NULL):
node = <ContourID *> malloc(sizeof(ContourID))
+ #print "Creating contour with id", contour_id
node.contour_id = contour_id
node.next = node.parent = NULL
- node.rank = 0
node.prev = prev
if prev != NULL: prev.next = node
return node
@@ -56,7 +59,7 @@
cdef ContourID *contour_find(ContourID *node):
cdef ContourID *temp, *root
root = node
- while root.parent != NULL and root.parent != root:
+ while root.parent != NULL and root.parent != root:
root = root.parent
if root == root.parent: root.parent = NULL
while node.parent != NULL:
@@ -66,13 +69,10 @@
return root
cdef void contour_union(ContourID *node1, ContourID *node2):
- if node1.rank > node2.rank:
+ if node1.contour_id < node2.contour_id:
node2.parent = node1
- elif node2.rank > node1.rank:
+ elif node2.contour_id < node1.contour_id:
node1.parent = node2
- else:
- node2.parent = node1
- node1.rank += 1
cdef struct CandidateContour
@@ -249,6 +249,71 @@
def __dealloc__(self):
self.clear()
+cdef class GridContourTree:
+ cdef np.float64_t min_val
+ cdef np.float64_t max_val
+
+ def __init__(self, np.float64_t min_val, np.float64_t max_val):
+ self.min_val = min_val
+ self.max_val = max_val
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def identify_contours(self, np.ndarray[np.float64_t, ndim=3] values,
+ np.ndarray[np.int64_t, ndim=3] contour_ids,
+ np.ndarray[np.int32_t, cast=True, ndim=3] child_mask,
+ np.int64_t start):
+ cdef int i, j, k, ni, nj, nk, offset
+ cdef int off_i, off_j, off_k, oi, ok, oj
+ cdef ContourID *cur = NULL
+ cdef ContourID *c1, *c2
+ cdef np.float64_t v
+ cdef np.int64_t nc
+ ni = values.shape[0]
+ nj = values.shape[1]
+ nk = values.shape[2]
+ nc = 0
+ cdef ContourID **container = <ContourID**> malloc(
+ sizeof(ContourID*)*ni*nj*nk)
+ for i in range(ni*nj*nk): container[i] = NULL
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ if child_mask[i,j,k] == 0: continue
+ v = values[i,j,k]
+ if v < self.min_val or v > self.max_val: continue
+ nc += 1
+ c1 = contour_create(nc + start)
+ cur = container[i*nj*nk + j*nk + k] = c1
+ for oi in range(3):
+ off_i = oi - 1 + i
+ if not (0 <= off_i < ni): continue
+ for oj in range(3):
+ off_j = oj - 1 + j
+ if not (0 <= off_j < nj): continue
+ for ok in range(3):
+ if oi == oj == ok == 1: continue
+ if off_k > k and off_j > j and off_i > i:
+ continue
+ off_k = ok - 1 + k
+ if not (0 <= off_k < nk): continue
+ offset = off_i*nj*nk + off_j*nk + off_k
+ c2 = container[offset]
+ if c2 == NULL: continue
+ c2 = contour_find(c2)
+ contour_union(cur, c2)
+ cur = contour_find(cur)
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ c1 = container[i*nj*nk + j*nk + k]
+ if c1 == NULL: continue
+ cur = c1
+ c1 = contour_find(c1)
+ contour_ids[i,j,k] = c1.contour_id
+ #free(cur)
+ free(container)
+
@cython.boundscheck(False)
@cython.wraparound(False)
def construct_boundary_relationships(
https://bitbucket.org/yt_analysis/yt/commits/2ad7a1017b32/
Changeset: 2ad7a1017b32
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-15 18:25:10
Summary: Once again, re-enable container returns.
Affected #: 1 file
diff -r 6786f57c376c436828b54b8843e9f4cb7fbf1fc2 -r 2ad7a1017b326a98273059dbfa7e31f64e6c9048 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -985,9 +985,9 @@
from yt.analysis_modules.level_sets.api import identify_contours
cids = identify_contours(self, field, cons[level], mv,
cached_fields)
- #for cid, cid_ind in cids.items():
- # contours[level][cid] = self.extract_region(cid_ind)
- return #cons, contours
+ for cid, cid_ind in cids.items():
+ contours[level][cid] = self.extract_region(cid_ind)
+ return cons, contours
def paint_grids(self, field, value, default_value=None):
"""
https://bitbucket.org/yt_analysis/yt/commits/a3ee4d86ec5c/
Changeset: a3ee4d86ec5c
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-15 18:35:02
Summary: Free memory.
Affected #: 1 file
diff -r 2ad7a1017b326a98273059dbfa7e31f64e6c9048 -r a3ee4d86ec5ce76e91f5c6fcbe21309c0708a535 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -311,7 +311,9 @@
cur = c1
c1 = contour_find(c1)
contour_ids[i,j,k] = c1.contour_id
- #free(cur)
+
+ for i in range(ni*nj*nk):
+ if container[i] != NULL: free(container[i])
free(container)
@cython.boundscheck(False)
https://bitbucket.org/yt_analysis/yt/commits/7e86e997f2dd/
Changeset: 7e86e997f2dd
Branch: yt-3.0
User: MatthewTurk
Date: 2012-03-16 14:08:12
Summary: Minor change to skip -1 contours
Affected #: 1 file
diff -r a3ee4d86ec5ce76e91f5c6fcbe21309c0708a535 -r 7e86e997f2dd27f53265fd640f2710d499606879 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -478,6 +478,7 @@
nc = contour_ids.shape[0]
nj = joins.shape[0]
for i in range(nc):
+ if contour_ids[i] == -1: continue
for j in range(nj):
if contour_ids[i] == joins[j,0]:
contour_ids[i] = joins[j,1]
https://bitbucket.org/yt_analysis/yt/commits/e0f7dcaf1828/
Changeset: e0f7dcaf1828
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-18 04:08:12
Summary: Initial attempt at a .tiles attribute and a slice_traversal function.
Affected #: 2 files
diff -r 7e86e997f2dd27f53265fd640f2710d499606879 -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -33,6 +33,8 @@
ParallelAnalysisInterface
from yt.utilities.parameter_file_storage import \
ParameterFileStore
+from yt.utilities.amr_kdtree.api import \
+ AMRKDTree
from .derived_quantities import DerivedQuantityCollection
from .field_info_container import \
NeedsGridType, ValidateSpatial
@@ -407,6 +409,14 @@
explicit_fields.append((ftype, fname))
return explicit_fields
+ _tree = None
+
+ @property
+ def tiles(self):
+ if self._tree is not None: return self._tree
+ self._tree = AMRKDTree(self.pf, data_source=self)
+ return self._tree
+
@property
def blocks(self):
for io_chunk in self.chunks([], "io"):
diff -r 7e86e997f2dd27f53265fd640f2710d499606879 -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -183,6 +183,24 @@
for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
yield self.get_brick_data(node)
+ def slice_traverse(self, viewpoint = None):
+ if not hasattr(self.pf.h, "grid"):
+ raise NotImplementedError
+ for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+ grid = self.pf.h.grids[node.grid - self._id_offset]
+ dds = grid.dds
+ gle = grid.LeftEdge
+ nle = get_left_edge(node)
+ nre = get_right_edge(node)
+ li = np.rint((nle-gle)/dds).astype('int32')
+ ri = np.rint((nre-gle)/dds).astype('int32')
+ dims = (ri - li).astype('int32')
+ sl = (slice(li[0], ri[0]),
+ slice(li[1], ri[1]),
+ slice(li[2], ri[2]))
+ gi = grid.get_global_startindex() + li
+ yield grid, node.node_id, (sl, dims, gi)
+
def get_node(self, nodeid):
path = np.binary_repr(nodeid)
depth = 1
https://bitbucket.org/yt_analysis/yt/commits/aa71e552b72f/
Changeset: aa71e552b72f
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-18 08:36:45
Summary: Mid-refactor and rewrite of contour finder with kD-tree.
Affected #: 7 files
diff -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 -r aa71e552b72ff0e0789e88091a492744ffa85097 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -14,7 +14,6 @@
#-----------------------------------------------------------------------------
from .contour_finder import \
- coalesce_join_tree, \
identify_contours
from .clump_handling import \
diff -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 -r aa71e552b72ff0e0789e88091a492744ffa85097 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -20,76 +20,29 @@
import yt.utilities.data_point_utilities as data_point_utilities
import yt.utilities.lib as amr_utils
-def coalesce_join_tree(jtree1):
- joins = defaultdict(set)
- nj = jtree1.shape[0]
- for i1 in range(nj):
- current_new = jtree1[i1, 0]
- current_old = jtree1[i1, 1]
- for i2 in range(nj):
- if jtree1[i2, 1] == current_new:
- current_new = max(current_new, jtree1[i2, 0])
- jtree1[i1, 0] = current_new
- for i1 in range(nj):
- joins[jtree1[i1, 0]].update([jtree1[i1, 1], jtree1[i1, 0]])
- updated = -1
- while updated != 0:
- keys = list(reversed(sorted(joins.keys())))
- updated = 0
- for k1 in keys + keys[::-1]:
- if k1 not in joins: continue
- s1 = joins[k1]
- for k2 in keys + keys[::-1]:
- if k2 >= k1: continue
- if k2 not in joins: continue
- s2 = joins[k2]
- if k2 in s1:
- s1.update(joins.pop(k2))
- updated += 1
- elif not s1.isdisjoint(s2):
- s1.update(joins.pop(k2))
- s1.update([k2])
- updated += 1
- tr = []
- for k in joins.keys():
- v = joins.pop(k)
- tr.append((k, np.array(list(v), dtype="int64")))
- return tr
-
def identify_contours(data_source, field, min_val, max_val,
cached_fields=None):
- pbar = get_pbar("First pass", len(data_source._grids))
- grids = sorted(data_source._grids, key=lambda g: -g.Level)
tree = amr_utils.ContourTree()
- gct = amr_utils.GridContourTree(min_val, max_val)
+ gct = amr_utils.TileContourTree(min_val, max_val)
total_contours = 0
- for gi,grid in enumerate(grids):
- pbar.update(gi+1)
- cm = data_source._get_cut_mask(grid)
- if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='int32')
- old_field_parameters = grid.field_parameters
- grid.field_parameters = data_source.field_parameters
- values = grid[field]
- grid.field_parameters = old_field_parameters
- grid["tempContours"] = na.zeros(grid.ActiveDimensions, "int64") - 1
- gct.identify_contours(values, grid["tempContours"], cm, total_contours)
- new_contours = tree.cull_candidates(grid["tempContours"])
+ contours = {}
+ empty_mask = np.ones((1,1,1), dtype="uint8")
+ for (grid, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+ nid = node.node_id
+ values = grid[field][sl].astype("float64")
+ contour_ids = np.zeros(dims, "int64") - 1
+ gct.identify_contours(values, contour_ids, total_contours)
+ new_contours = tree.cull_candidates(contour_ids)
total_contours += new_contours.shape[0]
tree.add_contours(new_contours)
- pbar.finish()
- pbar = get_pbar("Calculating joins ", len(data_source._grids))
- grid_set = set()
- for gi,grid in enumerate(grids):
- pbar.update(gi)
- cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
- grid_set.update(set(cg._grids))
- fd = cg["tempContours"].astype('int64')
- bt = amr_utils.construct_boundary_relationships(fd)
- # This recipe is from josef.pktd on the SciPy mailing list:
- # http://mail.scipy.org/pipermail/numpy-discussion/2009-August/044664.html
- joins = tree.cull_joins(bt)
- tree.add_joins(joins)
- pbar.finish()
+ # Now we can create a partitioned grid with the contours.
+ pg = amr_utils.PartitionedGrid(
+ [contour_ids], empty_mask, g.dds * gi, g.dds * (gi + dims), dims)
+ contours[nid] = (g.Level, pg)
+ trunk = data_source.tiles.tree.trunk
+ amr_utils.link_node_contours(trunk, contours, tree)
+ #joins = tree.cull_joins(bt)
+ #tree.add_joins(joins)
joins = tree.export()
ff = data_source["tempContours"].astype("int64")
amr_utils.update_joins(joins, ff)
@@ -103,7 +56,7 @@
for contour_id in data_source["tempContours"]:
if contour_id == -1 or contour_id in handled: continue
handled.add(contour_id)
- contour_ind[i] = na.where(data_source["tempContours"] == contour_id)
+ contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
i += 1
print "TREE ENTRIES", tree.count()
diff -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 -r aa71e552b72ff0e0789e88091a492744ffa85097 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,6 +18,10 @@
cimport cython
from libc.stdlib cimport malloc, free
+from amr_kdtools cimport _find_node, Node
+from grid_traversal cimport VolumeContainer, PartitionedGrid, \
+ vc_index, vc_pos_index
+
cdef extern from "math.h":
double fabs(double x)
@@ -249,7 +253,7 @@
def __dealloc__(self):
self.clear()
-cdef class GridContourTree:
+cdef class TileContourTree:
cdef np.float64_t min_val
cdef np.float64_t max_val
@@ -261,7 +265,6 @@
@cython.wraparound(False)
def identify_contours(self, np.ndarray[np.float64_t, ndim=3] values,
np.ndarray[np.int64_t, ndim=3] contour_ids,
- np.ndarray[np.int32_t, cast=True, ndim=3] child_mask,
np.int64_t start):
cdef int i, j, k, ni, nj, nk, offset
cdef int off_i, off_j, off_k, oi, ok, oj
@@ -279,7 +282,6 @@
for i in range(ni):
for j in range(nj):
for k in range(nk):
- if child_mask[i,j,k] == 0: continue
v = values[i,j,k]
if v < self.min_val or v > self.max_val: continue
nc += 1
@@ -316,43 +318,97 @@
if container[i] != NULL: free(container[i])
free(container)
- at cython.boundscheck(False)
+#@cython.boundscheck(False)
@cython.wraparound(False)
-def construct_boundary_relationships(
- np.ndarray[dtype=np.int64_t, ndim=3] contour_ids):
- # We only look at the boundary and one cell in
- cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj
+def link_node_contours(Node trunk, contours, ContourTree tree):
+ cdef int n_nodes = max(contours)
+ cdef VolumeContainer **vcs = <VolumeContainer **> malloc(
+ sizeof(VolumeContainer*) * n_nodes)
+ cdef int i
+ cdef PartitionedGrid pg
+ for i in range(n_nodes):
+ v = contours.get(i, None)
+ if v is None:
+ vcs[i] = NULL
+ continue
+ pg = v
+ vcs[i] = pg.container
+ cdef np.ndarray[np.uint8_t] examined = np.zeros(n_nodes, "uint8")
+ for nid, (level, pg) in sorted(contours.items(), key = lambda a: -a[1][0]):
+ construct_boundary_relationships(trunk, tree, nid, examined,
+ vcs)
+
+cdef inline void get_spos(VolumeContainer *vc, int i, int j, int k,
+ int axis, np.float64_t *spos):
+ spos[0] = vc.left_edge[0] + i * vc.dds[0]
+ spos[1] = vc.left_edge[1] + j * vc.dds[1]
+ spos[2] = vc.left_edge[2] + k * vc.dds[2]
+ spos[axis] += 0.5 * vc.dds[axis]
+
+cdef inline int spos_contained(VolumeContainer *vc, np.float64_t *spos):
+ cdef int i
+ for i in range(3):
+ if spos[i] < vc.left_edge[i] or spos[i] > vc.right_edge[i]: return 0
+ return 1
+
+#@cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void construct_boundary_relationships(Node trunk, ContourTree tree,
+ np.int64_t nid, np.ndarray[np.uint8_t, ndim=1] examined,
+ VolumeContainer **vcs):
+ # We only look at the boundary and find the nodes next to it.
+ # Contours is a dict, keyed by the node.id.
+ cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
cdef np.int64_t c1, c2
- nx = contour_ids.shape[0]
- ny = contour_ids.shape[1]
- nz = contour_ids.shape[2]
+ cdef Node adj_node
+ cdef VolumeContainer *vc1, *vc0 = vcs[nid]
+ nx = vc0.dims[0]
+ ny = vc0.dims[1]
+ nz = vc0.dims[2]
+ cdef int s = (ny*nx + nx*nz + ny*nz) * 18
# We allocate an array of fixed (maximum) size
- cdef int s = (ny*nx + nx*nz + ny*nz - 2) * 18
- cdef np.ndarray[np.int64_t, ndim=2] tree = np.zeros((s, 2), dtype="int64")
+ cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
cdef int ti = 0
- # First x-pass
+ cdef int index
+ cdef np.float64_t spos[3]
+
+ # First the x-pass
for i in range(ny):
for j in range(nz):
for offset_i in range(3):
oi = offset_i - 1
- if i == 0 and oi == -1: continue
- if i == ny - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == nz - 1 and oj == 1: continue
- c1 = contour_ids[0, i, j]
- c2 = contour_ids[1, i + oi, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[nx-1, i, j]
- c2 = contour_ids[nx-2, i + oi, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
+ # Adjust by -1 in x, then oi and oj in y and z
+ get_spos(vc0, -1, i + oi, j + oj, 0, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_id]
+ if examined[adj_node.node_id] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, 0, i, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ # This is outside our vc
+ get_spos(vc0, nx, i + oi, j + oj, 0, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_id]
+ if examined[adj_node.node_id] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, nx - 1, i, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
# Now y-pass
for i in range(nx):
for j in range(nz):
@@ -362,43 +418,69 @@
if i == nx - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == nz - 1 and oj == 1: continue
- c1 = contour_ids[i, 0, j]
- c2 = contour_ids[i + oi, 1, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[i, ny-1, j]
- c2 = contour_ids[i + oi, ny-2, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
+ get_spos(vc0, i + oi, -1, j + oj, 1, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_id]
+ if examined[adj_node.node_id] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, 0, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ get_spos(vc0, i + oi, ny, j + oj, 1, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_id]
+ if examined[adj_node.node_id] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, ny, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ # Now z-pass
for i in range(nx):
for j in range(ny):
for offset_i in range(3):
oi = offset_i - 1
- if i == 0 and oi == -1: continue
- if i == nx - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == ny - 1 and oj == 1: continue
- c1 = contour_ids[i, j, 0]
- c2 = contour_ids[i + oi, j + oj, 1]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[i, j, nz-1]
- c2 = contour_ids[i + oi, j + oj, nz-2]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- return tree[:ti,:]
+ get_spos(vc0, i + oi, j + oj, -1, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_id]
+ if examined[adj_node.node_id] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, 0)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+
+ get_spos(vc0, i + oi, j + oj, nz, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_id]
+ if examined[adj_node.node_id] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, nz)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ new_joins = tree.cull_joins(tree[:ti,:])
+ tree.add_joins(new_joins)
cdef inline int are_neighbors(
np.float64_t x1, np.float64_t y1, np.float64_t z1,
diff -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 -r aa71e552b72ff0e0789e88091a492744ffa85097 yt/utilities/lib/amr_kdtools.pxd
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -0,0 +1,38 @@
+"""
+AMR kD-Tree Cython Tools
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+
+cdef struct Split:
+ int dim
+ np.float64_t pos
+
+cdef class Node
+
+cdef class Node:
+ cdef public Node left
+ cdef public Node right
+ cdef public Node parent
+ cdef public int grid
+ cdef public np.int64_t node_id
+ cdef np.float64_t left_edge[3]
+ cdef np.float64_t right_edge[3]
+ cdef public data
+ cdef Split * split
+ cdef int level
+
+cdef int point_in_node(Node node, np.ndarray[np.float64_t, ndim=1] point)
+cdef Node _find_node(Node node, np.float64_t *point)
+cdef int _kd_is_leaf(Node node)
diff -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 -r aa71e552b72ff0e0789e88091a492744ffa85097 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -25,25 +25,11 @@
DEF Nch = 4
-cdef struct Split:
- int dim
- np.float64_t pos
-
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
cdef class Node:
- cdef public Node left
- cdef public Node right
- cdef public Node parent
- cdef public int grid
- cdef public np.int64_t node_id
- cdef np.float64_t left_edge[3]
- cdef np.float64_t right_edge[3]
- cdef public data
- cdef Split * split
-
def __cinit__(self,
Node parent,
Node left,
@@ -152,11 +138,11 @@
def kd_traverse(Node trunk, viewpoint=None):
if viewpoint is None:
for node in depth_traverse(trunk):
- if kd_is_leaf(node) and node.grid != -1:
+ if _kd_is_leaf(node) == 1 and node.grid != -1:
yield node
else:
for node in viewpoint_traverse(trunk, viewpoint):
- if kd_is_leaf(node) and node.grid != -1:
+ if _kd_is_leaf(node) == 1 and node.grid != -1:
yield node
@cython.boundscheck(False)
@@ -172,7 +158,7 @@
if not should_i_build(node, rank, size):
return
- if kd_is_leaf(node):
+ if _kd_is_leaf(node) == 1:
insert_grid(node, gle, gre, gid, rank, size)
else:
less_id = gle[node.split.dim] < node.split.pos
@@ -295,7 +281,7 @@
if not should_i_build(node, rank, size):
return
- if kd_is_leaf(node):
+ if _kd_is_leaf(node) == 1:
insert_grids(node, ngrids, gles, gres, gids, rank, size)
return
@@ -766,11 +752,16 @@
assert has_l_child == has_r_child
return has_l_child
+cdef int _kd_is_leaf(Node node):
+ if node.left is None or node.right is None:
+ return 1
+ return 0
+
def step_depth(Node current, Node previous):
'''
Takes a single step in the depth-first traversal
'''
- if kd_is_leaf(current): # At a leaf, move back up
+ if _kd_is_leaf(current) == 1: # At a leaf, move back up
previous = current
current = current.parent
@@ -862,7 +853,7 @@
Takes a single step in the viewpoint based traversal. Always
goes to the node furthest away from viewpoint first.
'''
- if kd_is_leaf(current): # At a leaf, move back up
+ if _kd_is_leaf(current) == 1: # At a leaf, move back up
previous = current
current = current.parent
elif current.split.dim is None: # This is a dead node
@@ -913,6 +904,13 @@
inside *= node.right_edge[i] > point[i]
return inside
+cdef Node _find_node(Node node, np.float64_t *point):
+ while _kd_is_leaf(node) == 0:
+ if point[node.split.dim] < node.split.pos:
+ node = node.left
+ else:
+ node = node.right
+ return node
def find_node(Node node,
np.ndarray[np.float64_t, ndim=1] point):
@@ -920,12 +918,5 @@
Find the AMRKDTree node enclosing a position
"""
assert(point_in_node(node, point))
- while not kd_is_leaf(node):
- if point[node.split.dim] < node.split.pos:
- node = node.left
- else:
- node = node.right
- return node
+ return _find_node(node, <np.float64_t *> point.data)
-
-
diff -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 -r aa71e552b72ff0e0789e88091a492744ffa85097 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -17,6 +17,7 @@
import numpy as np
cimport numpy as np
cimport cython
+cimport kdtree_utils
cdef struct VolumeContainer:
int n_fields
@@ -29,6 +30,20 @@
np.float64_t idds[3]
int dims[3]
+cdef class PartitionedGrid:
+ cdef public object my_data
+ cdef public object source_mask
+ cdef public object LeftEdge
+ cdef public object RightEdge
+ cdef public int parent_grid_id
+ cdef VolumeContainer *container
+ cdef kdtree_utils.kdtree *star_list
+ cdef np.float64_t star_er
+ cdef np.float64_t star_sigma_num
+ cdef np.float64_t star_coeff
+ cdef void get_vector_field(self, np.float64_t pos[3],
+ np.float64_t *vel, np.float64_t *vel_mag)
+
ctypedef void sample_function(
VolumeContainer *vc,
np.float64_t v_pos[3],
@@ -45,3 +60,12 @@
void *data,
np.float64_t *return_t = *,
np.float64_t enter_t = *) nogil
+
+cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
+ return (i*vc.dims[1]+j)*vc.dims[2]+k
+
+cdef inline int vc_pos_index(VolumeContainer *vc, np.float64_t *spos):
+ cdef int i, index[3]
+ for i in range(3):
+ index[i] = <int> ((spos[i] - vc.left_edge[i]) * vc.idds[i])
+ return vc_index(vc, index[0], index[1], index[2])
diff -r e0f7dcaf18287c9418c5ff64b25b9f705d250073 -r aa71e552b72ff0e0789e88091a492744ffa85097 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -16,7 +16,6 @@
import numpy as np
cimport numpy as np
cimport cython
-cimport kdtree_utils
#cimport healpix_interface
from libc.stdlib cimport malloc, free, abs
from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -58,16 +57,6 @@
void *data) nogil
cdef class PartitionedGrid:
- cdef public object my_data
- cdef public object source_mask
- cdef public object LeftEdge
- cdef public object RightEdge
- cdef public int parent_grid_id
- cdef VolumeContainer *container
- cdef kdtree_utils.kdtree *star_list
- cdef np.float64_t star_er
- cdef np.float64_t star_sigma_num
- cdef np.float64_t star_coeff
@cython.boundscheck(False)
@cython.wraparound(False)
https://bitbucket.org/yt_analysis/yt/commits/820e80956cfe/
Changeset: 820e80956cfe
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-18 09:12:23
Summary: Joins now in-place.
Affected #: 5 files
diff -r aa71e552b72ff0e0789e88091a492744ffa85097 -r 820e80956cfe7965d70d3b309208c264a52cf207 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -27,20 +27,26 @@
total_contours = 0
contours = {}
empty_mask = np.ones((1,1,1), dtype="uint8")
- for (grid, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+ node_ids = []
+ for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+ node.node_ind = len(node_ids)
nid = node.node_id
- values = grid[field][sl].astype("float64")
+ node_ids.append(nid)
+ values = g[field][sl].astype("float64")
contour_ids = np.zeros(dims, "int64") - 1
gct.identify_contours(values, contour_ids, total_contours)
new_contours = tree.cull_candidates(contour_ids)
total_contours += new_contours.shape[0]
tree.add_contours(new_contours)
# Now we can create a partitioned grid with the contours.
- pg = amr_utils.PartitionedGrid(
- [contour_ids], empty_mask, g.dds * gi, g.dds * (gi + dims), dims)
- contours[nid] = (g.Level, pg)
+ pg = amr_utils.PartitionedGrid(g.id,
+ [contour_ids.view("float64")],
+ empty_mask, g.dds * gi, g.dds * (gi + dims),
+ dims.astype("int64"))
+ contours[nid] = (g.Level, node.node_ind, pg)
+ node_ids = np.array(node_ids)
trunk = data_source.tiles.tree.trunk
- amr_utils.link_node_contours(trunk, contours, tree)
+ amr_utils.link_node_contours(trunk, contours, tree, node_ids)
#joins = tree.cull_joins(bt)
#tree.add_joins(joins)
joins = tree.export()
diff -r aa71e552b72ff0e0789e88091a492744ffa85097 -r 820e80956cfe7965d70d3b309208c264a52cf207 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -199,7 +199,7 @@
slice(li[1], ri[1]),
slice(li[2], ri[2]))
gi = grid.get_global_startindex() + li
- yield grid, node.node_id, (sl, dims, gi)
+ yield grid, node, (sl, dims, gi)
def get_node(self, nodeid):
path = np.binary_repr(nodeid)
diff -r aa71e552b72ff0e0789e88091a492744ffa85097 -r 820e80956cfe7965d70d3b309208c264a52cf207 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -320,23 +320,23 @@
#@cython.boundscheck(False)
@cython.wraparound(False)
-def link_node_contours(Node trunk, contours, ContourTree tree):
- cdef int n_nodes = max(contours)
+def link_node_contours(Node trunk, contours, ContourTree tree,
+ np.ndarray[np.int64_t, ndim=1] node_ids):
+ cdef int n_nodes = node_ids.shape[0]
+ cdef np.int64_t node_ind
cdef VolumeContainer **vcs = <VolumeContainer **> malloc(
sizeof(VolumeContainer*) * n_nodes)
cdef int i
cdef PartitionedGrid pg
for i in range(n_nodes):
- v = contours.get(i, None)
- if v is None:
- vcs[i] = NULL
- continue
- pg = v
+ pg = contours[node_ids[i]][2]
vcs[i] = pg.container
cdef np.ndarray[np.uint8_t] examined = np.zeros(n_nodes, "uint8")
- for nid, (level, pg) in sorted(contours.items(), key = lambda a: -a[1][0]):
- construct_boundary_relationships(trunk, tree, nid, examined,
- vcs)
+ for nid, (level, node_ind, pg) in sorted(contours.items(),
+ key = lambda a: -a[1][0]):
+ construct_boundary_relationships(trunk, tree, node_ind,
+ examined, vcs, node_ids)
+ examined[node_ind] = 1
cdef inline void get_spos(VolumeContainer *vc, int i, int j, int k,
int axis, np.float64_t *spos):
@@ -355,7 +355,8 @@
@cython.wraparound(False)
cdef void construct_boundary_relationships(Node trunk, ContourTree tree,
np.int64_t nid, np.ndarray[np.uint8_t, ndim=1] examined,
- VolumeContainer **vcs):
+ VolumeContainer **vcs,
+ np.ndarray[np.int64_t, ndim=1] node_ids):
# We only look at the boundary and find the nodes next to it.
# Contours is a dict, keyed by the node.id.
cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
@@ -382,8 +383,8 @@
# Adjust by -1 in x, then oi and oj in y and z
get_spos(vc0, -1, i + oi, j + oj, 0, spos)
adj_node = _find_node(trunk, spos)
- vc1 = vcs[adj_node.node_id]
- if examined[adj_node.node_id] == 0 and \
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
spos_contained(vc1, spos):
# This is outside our VC, as 0 is a boundary layer
index = vc_index(vc0, 0, i, j)
@@ -397,8 +398,8 @@
# This is outside our vc
get_spos(vc0, nx, i + oi, j + oj, 0, spos)
adj_node = _find_node(trunk, spos)
- vc1 = vcs[adj_node.node_id]
- if examined[adj_node.node_id] == 0 and \
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
spos_contained(vc1, spos):
# This is outside our VC, as 0 is a boundary layer
index = vc_index(vc0, nx - 1, i, j)
@@ -420,8 +421,8 @@
oj = offset_j - 1
get_spos(vc0, i + oi, -1, j + oj, 1, spos)
adj_node = _find_node(trunk, spos)
- vc1 = vcs[adj_node.node_id]
- if examined[adj_node.node_id] == 0 and \
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
spos_contained(vc1, spos):
# This is outside our VC, as 0 is a boundary layer
index = vc_index(vc0, i, 0, j)
@@ -432,13 +433,14 @@
joins[ti,0] = i64max(c1,c2)
joins[ti,1] = i64min(c1,c2)
ti += 1
+
get_spos(vc0, i + oi, ny, j + oj, 1, spos)
adj_node = _find_node(trunk, spos)
- vc1 = vcs[adj_node.node_id]
- if examined[adj_node.node_id] == 0 and \
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
spos_contained(vc1, spos):
# This is outside our VC, as 0 is a boundary layer
- index = vc_index(vc0, i, ny, j)
+ index = vc_index(vc0, i, ny - 1, j)
c1 = (<np.int64_t*>vc0.data[0])[index]
index = vc_pos_index(vc1, spos)
c2 = (<np.int64_t*>vc1.data[0])[index]
@@ -456,30 +458,35 @@
oj = offset_j - 1
get_spos(vc0, i + oi, j + oj, -1, 2, spos)
adj_node = _find_node(trunk, spos)
- vc1 = vcs[adj_node.node_id]
- if examined[adj_node.node_id] == 0 and \
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
spos_contained(vc1, spos):
# This is outside our VC, as 0 is a boundary layer
index = vc_index(vc0, i, j, 0)
c1 = (<np.int64_t*>vc0.data[0])[index]
index = vc_pos_index(vc1, spos)
c2 = (<np.int64_t*>vc1.data[0])[index]
-
- get_spos(vc0, i + oi, j + oj, nz, 2, spos)
- adj_node = _find_node(trunk, spos)
- vc1 = vcs[adj_node.node_id]
- if examined[adj_node.node_id] == 0 and \
- spos_contained(vc1, spos):
- # This is outside our VC, as 0 is a boundary layer
- index = vc_index(vc0, i, j, nz)
- c1 = (<np.int64_t*>vc0.data[0])[index]
- index = vc_pos_index(vc1, spos)
- c2 = (<np.int64_t*>vc1.data[0])[index]
if c1 > -1 and c2 > -1:
joins[ti,0] = i64max(c1,c2)
joins[ti,1] = i64min(c1,c2)
ti += 1
- new_joins = tree.cull_joins(tree[:ti,:])
+
+ get_spos(vc0, i + oi, j + oj, nz, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, nz - 1)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ if ti == 0: return
+ new_joins = tree.cull_joins(joins[:ti,:])
tree.add_joins(new_joins)
cdef inline int are_neighbors(
diff -r aa71e552b72ff0e0789e88091a492744ffa85097 -r 820e80956cfe7965d70d3b309208c264a52cf207 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -27,6 +27,7 @@
cdef public Node parent
cdef public int grid
cdef public np.int64_t node_id
+ cdef public np.int64_t node_ind
cdef np.float64_t left_edge[3]
cdef np.float64_t right_edge[3]
cdef public data
diff -r aa71e552b72ff0e0789e88091a492744ffa85097 -r 820e80956cfe7965d70d3b309208c264a52cf207 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -65,7 +65,9 @@
["yt/utilities/lib/ContourFinding.pyx",
"yt/utilities/lib/union_find.c"],
include_dirs=["yt/utilities/lib/"],
- libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+ libraries=["m"],
+ depends=["yt/utilities/lib/fp_utils.pxd",
+ "yt/utilities/lib/amr_kdtools.pxd"])
config.add_extension("DepthFirstOctree",
["yt/utilities/lib/DepthFirstOctree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
https://bitbucket.org/yt_analysis/yt/commits/596ab5b5efb1/
Changeset: 596ab5b5efb1
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-18 09:32:00
Summary: Cacheing is broken with removal of hdf5_light_reader.
Affected #: 1 file
diff -r 820e80956cfe7965d70d3b309208c264a52cf207 -r 596ab5b5efb121c029724c72e135f5b59a802e6e yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -670,7 +670,7 @@
if len(self.queue) == 0: raise StopIteration
chunk = YTDataChunk(None, "cache", self.queue, cache=False)
self.cache = self.geometry_handler.io._read_chunk_data(
- chunk, self.preload_fields)
+ chunk, self.preload_fields) or {}
g = self.queue.pop(0)
g._initialize_cache(self.cache.pop(g.id, {}))
return g
https://bitbucket.org/yt_analysis/yt/commits/2700da832c93/
Changeset: 2700da832c93
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-18 09:37:00
Summary: Re-writing clump finding to use kD-tree, adding contour field.
Affected #: 3 files
diff -r 596ab5b5efb121c029724c72e135f5b59a802e6e -r 2700da832c935801170085bba757c89a9f3f6e3c yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -43,32 +43,21 @@
[contour_ids.view("float64")],
empty_mask, g.dds * gi, g.dds * (gi + dims),
dims.astype("int64"))
- contours[nid] = (g.Level, node.node_ind, pg)
+ contours[nid] = (g.Level, node.node_ind, pg, sl)
node_ids = np.array(node_ids)
trunk = data_source.tiles.tree.trunk
+ mylog.info("Linking node (%s) contours.", len(contours))
amr_utils.link_node_contours(trunk, contours, tree, node_ids)
#joins = tree.cull_joins(bt)
#tree.add_joins(joins)
joins = tree.export()
- ff = data_source["tempContours"].astype("int64")
- amr_utils.update_joins(joins, ff)
- data_source["tempContours"] = ff.astype("float64")
- data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
- del data_source.field_data["tempContours"] # Force a reload from the grids
- data_source.get_data("tempContours")
- contour_ind = {}
- i = 0
- handled = set()
- for contour_id in data_source["tempContours"]:
- if contour_id == -1 or contour_id in handled: continue
- handled.add(contour_id)
- contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
- mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
- i += 1
- print "TREE ENTRIES", tree.count()
- mylog.info("Identified %s contours between %0.5e and %0.5e",
- len(contour_ind.keys()),min_val,max_val)
- for grid in chain(grid_set):
- grid.field_data.pop("tempContours", None)
- del data_source.field_data["tempContours"]
- return contour_ind
+ contour_ids = defaultdict(list)
+ pbar = get_pbar("Updating joins ... ", len(contours))
+ for i, nid in enumerate(sorted(contours)):
+ level, node_ind, pg, sl = contours[nid]
+ ff = pg.my_data[0].view("int64")
+ amr_utils.update_joins(joins, ff)
+ contour_ids[pg.parent_grid_id].append((sl, ff))
+ pbar.update(i)
+ pbar.finish()
+ return dict(contour_ids.items())
diff -r 596ab5b5efb121c029724c72e135f5b59a802e6e -r 2700da832c935801170085bba757c89a9f3f6e3c yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -582,12 +582,17 @@
units=r"\rm{s}^{-1}")
def _Contours(field, data):
- return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
- display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
- validators=[ValidateSpatial(0), ValidateGridType()],
- take_log=False, display_field=False)
+ fd = data.get_field_parameter("contour_slices")
+ vals = data["Ones"] * -1
+ if fd is None or fd == 0.0:
+ return vals
+ for sl, v in fd.get(data.id, []):
+ vals[sl] = v
+ return vals
+add_field("Contours", validators=[ValidateSpatial(0)],
+ take_log=False,
+ display_field=False,
+ function=_Contours)
def obtain_velocities(data):
return obtain_rv_vec(data)
diff -r 596ab5b5efb121c029724c72e135f5b59a802e6e -r 2700da832c935801170085bba757c89a9f3f6e3c yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -332,8 +332,8 @@
pg = contours[node_ids[i]][2]
vcs[i] = pg.container
cdef np.ndarray[np.uint8_t] examined = np.zeros(n_nodes, "uint8")
- for nid, (level, node_ind, pg) in sorted(contours.items(),
- key = lambda a: -a[1][0]):
+ for nid, cinfo in sorted(contours.items(), key = lambda a: -a[1][0]):
+ level, node_ind, pg, sl = cinfo
construct_boundary_relationships(trunk, tree, node_ind,
examined, vcs, node_ids)
examined[node_ind] = 1
@@ -561,14 +561,16 @@
@cython.boundscheck(False)
@cython.wraparound(False)
def update_joins(np.ndarray[np.int64_t, ndim=2] joins,
- np.ndarray[np.int64_t, ndim=1] contour_ids):
+ np.ndarray[np.int64_t, ndim=3] contour_ids):
cdef np.int64_t new, old
- cdef int i, j, nc, nj
- nc = contour_ids.shape[0]
+ cdef int i, j, nj
+ cdef int ci, cj, ck
nj = joins.shape[0]
- for i in range(nc):
- if contour_ids[i] == -1: continue
- for j in range(nj):
- if contour_ids[i] == joins[j,0]:
- contour_ids[i] = joins[j,1]
- break
+ for ci in range(contour_ids.shape[0]):
+ for cj in range(contour_ids.shape[1]):
+ for ck in range(contour_ids.shape[2]):
+ if contour_ids[ci,cj,ck] == -1: continue
+ for j in range(nj):
+ if contour_ids[ci,cj,ck] == joins[j,0]:
+ contour_ids[ci,cj,ck] = joins[j,1]
+ break
https://bitbucket.org/yt_analysis/yt/commits/b54f6a78248b/
Changeset: b54f6a78248b
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-18 10:47:13
Summary: final_joins and a field_parameter_state contextmanager.
Affected #: 4 files
diff -r 2700da832c935801170085bba757c89a9f3f6e3c -r b54f6a78248b4d8839222878964544137f376045 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -48,16 +48,20 @@
trunk = data_source.tiles.tree.trunk
mylog.info("Linking node (%s) contours.", len(contours))
amr_utils.link_node_contours(trunk, contours, tree, node_ids)
+ mylog.info("Linked.")
#joins = tree.cull_joins(bt)
#tree.add_joins(joins)
joins = tree.export()
contour_ids = defaultdict(list)
pbar = get_pbar("Updating joins ... ", len(contours))
+ final_joins = np.unique(joins[:,1])
for i, nid in enumerate(sorted(contours)):
level, node_ind, pg, sl = contours[nid]
ff = pg.my_data[0].view("int64")
- amr_utils.update_joins(joins, ff)
+ amr_utils.update_joins(joins, ff, final_joins)
contour_ids[pg.parent_grid_id].append((sl, ff))
pbar.update(i)
pbar.finish()
- return dict(contour_ids.items())
+ rv = dict()
+ rv.update(contour_ids)
+ return rv
diff -r 2700da832c935801170085bba757c89a9f3f6e3c -r b54f6a78248b4d8839222878964544137f376045 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -278,7 +278,8 @@
chunk_fields, "io")):
mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
get_memory_usage()/1024.)
- self._handle_chunk(chunk, fields, tree)
+ with chunk._field_parameter_state(self.field_parameters):
+ self._handle_chunk(chunk, fields, tree)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
merge_style = -1
diff -r 2700da832c935801170085bba757c89a9f3f6e3c -r b54f6a78248b4d8839222878964544137f376045 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -367,6 +367,13 @@
return s
@contextmanager
+ def _field_parameter_state(self, field_parameters):
+ old_field_parameters = self.field_parameters
+ self.field_parameters = field_parameters
+ yield
+ self.field_parameters = old_field_parameters
+
+ @contextmanager
def _field_type_state(self, ftype, finfo, obj = None):
if obj is None: obj = self
old_particle_type = obj._current_particle_type
diff -r 2700da832c935801170085bba757c89a9f3f6e3c -r b54f6a78248b4d8839222878964544137f376045 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -318,7 +318,7 @@
if container[i] != NULL: free(container[i])
free(container)
-#@cython.boundscheck(False)
+ at cython.boundscheck(False)
@cython.wraparound(False)
def link_node_contours(Node trunk, contours, ContourTree tree,
np.ndarray[np.int64_t, ndim=1] node_ids):
@@ -351,7 +351,7 @@
if spos[i] < vc.left_edge[i] or spos[i] > vc.right_edge[i]: return 0
return 1
-#@cython.boundscheck(False)
+ at cython.boundscheck(False)
@cython.wraparound(False)
cdef void construct_boundary_relationships(Node trunk, ContourTree tree,
np.int64_t nid, np.ndarray[np.uint8_t, ndim=1] examined,
@@ -561,11 +561,13 @@
@cython.boundscheck(False)
@cython.wraparound(False)
def update_joins(np.ndarray[np.int64_t, ndim=2] joins,
- np.ndarray[np.int64_t, ndim=3] contour_ids):
+ np.ndarray[np.int64_t, ndim=3] contour_ids,
+ np.ndarray[np.int64_t, ndim=1] final_joins):
cdef np.int64_t new, old
- cdef int i, j, nj
+ cdef int i, j, nj, nf
cdef int ci, cj, ck
nj = joins.shape[0]
+ nf = final_joins.shape[0]
for ci in range(contour_ids.shape[0]):
for cj in range(contour_ids.shape[1]):
for ck in range(contour_ids.shape[2]):
@@ -574,3 +576,7 @@
if contour_ids[ci,cj,ck] == joins[j,0]:
contour_ids[ci,cj,ck] = joins[j,1]
break
+ for j in range(nf):
+ if contour_ids[ci,cj,ck] == final_joins[j]:
+ contour_ids[ci,cj,ck] = j
+ break
https://bitbucket.org/yt_analysis/yt/commits/bfd3c32384ff/
Changeset: bfd3c32384ff
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-18 10:55:31
Summary: Fixing usage of _field_parameter_state.
Affected #: 3 files
diff -r b54f6a78248b4d8839222878964544137f376045 -r bfd3c32384ff1ce71b8cc1e699ed105935885e6f yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -274,11 +274,11 @@
for chunk in self.data_source.chunks([], "io"):
self._initialize_chunk(chunk, tree)
# This needs to be parallel_objects-ified
- for chunk in parallel_objects(self.data_source.chunks(
- chunk_fields, "io")):
- mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
- get_memory_usage()/1024.)
- with chunk._field_parameter_state(self.field_parameters):
+ with self.data_source._field_parameter_state(self.field_parameters):
+ for chunk in parallel_objects(self.data_source.chunks(
+ chunk_fields, "io")):
+ mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+ get_memory_usage()/1024.)
self._handle_chunk(chunk, fields, tree)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
diff -r b54f6a78248b4d8839222878964544137f376045 -r bfd3c32384ff1ce71b8cc1e699ed105935885e6f yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -592,6 +592,7 @@
add_field("Contours", validators=[ValidateSpatial(0)],
take_log=False,
display_field=False,
+ projection_conversion="1",
function=_Contours)
def obtain_velocities(data):
diff -r b54f6a78248b4d8839222878964544137f376045 -r bfd3c32384ff1ce71b8cc1e699ed105935885e6f yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -578,5 +578,5 @@
break
for j in range(nf):
if contour_ids[ci,cj,ck] == final_joins[j]:
- contour_ids[ci,cj,ck] = j
+ contour_ids[ci,cj,ck] = j + 1
break
https://bitbucket.org/yt_analysis/yt/commits/32ff4565eef7/
Changeset: 32ff4565eef7
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 17:53:11
Summary: Initial implementation of a cutting region.
Affected #: 1 file
diff -r bfd3c32384ff1ce71b8cc1e699ed105935885e6f -r 32ff4565eef7234f387aafcb8de0aa061fe93d12 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -634,3 +634,60 @@
self.set_field_parameter('e0', e0)
self.set_field_parameter('e1', e1)
self.set_field_parameter('e2', e2)
+
+class YTCutRegionBase(YTSelectionContainer3D):
+ def __init__(self, conditionals, base_object):
+ self.pf = base_object.pf
+ self.conditionals = ensure_list(conditionals)
+ self.base_object = base_object
+ self._selector = None
+ # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
+ # ires and get_data
+ super(YTSelectionContainer3D, self).__init__(self.pf, {})
+
+ @property
+ def selector(self):
+ raise NotImplementedError
+
+ def chunks(self, fields, chunking_style, **kwargs):
+ # We actually want to chunk the sub-chunk, not ourselves. We have no
+ # chunks to speak of, as we do not data IO.
+ for chunk in self.hierarchy._chunk(self.base_object, chunking_style,
+ **kwargs):
+ with self.base_object._chunked_read(chunk):
+ self.get_data(fields)
+ yield self
+
+ def get_data(self, fields = None):
+ fields = ensure_list(fields)
+ self.base_object.get_data(fields)
+ ind = self._cond_ind
+ for field in fields:
+ self.field_data[field] = self.base_object[field][ind]
+
+ @property
+ def _cond_ind(self):
+ ind = None
+ obj = self.base_object
+ for cond in self.conditionals:
+ res = eval(cond)
+ if ind is None: ind = res
+ np.logical_and(res, ind, ind)
+ return ind
+
+ @property
+ def icoords(self):
+ return self.base_object.icoords[self._cond_ind,:]
+
+ @property
+ def fcoords(self):
+ return self.base_object.fcoords[self._cond_ind,:]
+
+ @property
+ def ires(self):
+ return self.base_object.ires[self._cond_ind]
+
+ @property
+ def fwidth(self):
+ return self.base_object.fwidth[self._cond_ind,:]
+
https://bitbucket.org/yt_analysis/yt/commits/5752e507beb8/
Changeset: 5752e507beb8
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 21:33:23
Summary: Adding docstring to cut_region
Affected #: 2 files
diff -r 32ff4565eef7234f387aafcb8de0aa061fe93d12 -r 5752e507beb8f1895b6019afe64eebac1198810d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -765,9 +765,11 @@
def cut_region(self, field_cuts):
"""
- Return an InLineExtractedRegion, where the grid cells are cut on the
- fly with a set of field_cuts. It is very useful for applying
- conditions to the fields in your data object.
+ Return an InLineExtractedRegion, where the object cells are cut on the
+ fly with a set of field_cuts. It is very useful for applying
+ conditions to the fields in your data object. Note that in previous
+ versions of yt, this accepted 'grid' as a variable, but presently it
+ requires 'obj'.
Examples
--------
@@ -775,19 +777,11 @@
>>> pf = load("RedshiftOutput0005")
>>> ad = pf.h.all_data()
- >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+ >>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
>>> print cr.quantities["TotalQuantity"]("CellMassMsun")
-
"""
- return YTValueCutExtractionBase(self, field_cuts)
-
- def extract_region(self, indices):
- """
- Return an ExtractedRegion where the points contained in it are defined
- as the points in `this` data object with the given *indices*.
- """
- fp = self.field_parameters.copy()
- return YTSelectedIndicesBase(self, indices, field_parameters = fp)
+ cr = self.pf.h.cut_region(self, field_cuts)
+ return cr
def extract_isocontours(self, field, value, filename = None,
rescale = False, sample_values = None):
diff -r 32ff4565eef7234f387aafcb8de0aa061fe93d12 -r 5752e507beb8f1895b6019afe64eebac1198810d yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -636,7 +636,31 @@
self.set_field_parameter('e2', e2)
class YTCutRegionBase(YTSelectionContainer3D):
- def __init__(self, conditionals, base_object):
+ """
+ This is a data object designed to allow individuals to apply logical
+ operations to fields or particles and filter as a result of those cuts.
+
+ Parameters
+ ----------
+ conditionals : list of strings
+ A list of conditionals that will be evaluated. In the namespace
+ available, these conditionals will have access to 'obj' which is a data
+ object of unknown shape, and they must generate a boolean array. For
+ instance, conditionals = ["obj['temperature'] < 1e3"]
+ base_object : YTSelectionContainer3D
+ The object to which cuts will be applied.
+
+ Examples
+ --------
+
+ >>> pf = load("DD0010/moving7_0010")
+ >>> sp = pf.h.sphere("max", (1.0, 'mpc'))
+ >>> cr = pf.h.cut_region( ["obj['temperature'] < 1e3"], sp)
+ """
+ _type_name = "cut_region"
+ _con_args = ('conditionals', 'base_object')
+ def __init__(self, conditionals, base_object, pf = None,
+ field_parameters = None):
self.pf = base_object.pf
self.conditionals = ensure_list(conditionals)
self.base_object = base_object
https://bitbucket.org/yt_analysis/yt/commits/bc562b02cc6e/
Changeset: bc562b02cc6e
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 21:33:40
Summary: Re-enable tests for cut_region, remove extract_region as it is gone.
Affected #: 2 files
diff -r 5752e507beb8f1895b6019afe64eebac1198810d -r bc562b02cc6e4f8c3b7c4c4095bfe40c0165bed7 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -642,32 +642,31 @@
Parameters
----------
+ base_object : YTSelectionContainer3D
+ The object to which cuts will be applied.
conditionals : list of strings
A list of conditionals that will be evaluated. In the namespace
available, these conditionals will have access to 'obj' which is a data
object of unknown shape, and they must generate a boolean array. For
instance, conditionals = ["obj['temperature'] < 1e3"]
- base_object : YTSelectionContainer3D
- The object to which cuts will be applied.
Examples
--------
>>> pf = load("DD0010/moving7_0010")
>>> sp = pf.h.sphere("max", (1.0, 'mpc'))
- >>> cr = pf.h.cut_region( ["obj['temperature'] < 1e3"], sp)
+ >>> cr = pf.h.cut_region(sp, ["obj['temperature'] < 1e3"])
"""
_type_name = "cut_region"
- _con_args = ('conditionals', 'base_object')
- def __init__(self, conditionals, base_object, pf = None,
+ _con_args = ("base_object", "conditionals")
+ def __init__(self, base_object, conditionals, pf = None,
field_parameters = None):
- self.pf = base_object.pf
+ super(YTCutRegionBase, self).__init__(base_object.center, pf, field_parameters)
self.conditionals = ensure_list(conditionals)
self.base_object = base_object
self._selector = None
# Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
# ires and get_data
- super(YTSelectionContainer3D, self).__init__(self.pf, {})
@property
def selector(self):
diff -r 5752e507beb8f1895b6019afe64eebac1198810d -r bc562b02cc6e4f8c3b7c4c4095bfe40c0165bed7 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -6,15 +6,14 @@
def test_cut_region():
# We decompose in different ways
- return #TESTDISABLED
for nprocs in [1, 2, 4, 8]:
pf = fake_random_pf(64, nprocs = nprocs,
fields = ("Density", "Temperature", "x-velocity"))
# We'll test two objects
dd = pf.h.all_data()
- r = dd.cut_region( [ "grid['Temperature'] > 0.5",
- "grid['Density'] < 0.75",
- "grid['x-velocity'] > 0.25" ])
+ r = dd.cut_region( [ "obj['Temperature'] > 0.5",
+ "obj['Density'] < 0.75",
+ "obj['x-velocity'] > 0.25" ])
t = ( (dd["Temperature"] > 0.5 )
& (dd["Density"] < 0.75 )
& (dd["x-velocity"] > 0.25 ) )
@@ -23,33 +22,7 @@
yield assert_equal, np.all(r["x-velocity"] > 0.25), True
yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+ r2 = r.cut_region( [ "obj['Temperature'] < 0.75" ] )
t2 = (r["Temperature"] < 0.75)
yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
yield assert_equal, np.all(r2["Temperature"] < 0.75), True
-
-def test_extract_region():
- # We decompose in different ways
- return #TESTDISABLED
- for nprocs in [1, 2, 4, 8]:
- pf = fake_random_pf(64, nprocs = nprocs,
- fields = ("Density", "Temperature", "x-velocity"))
- # We'll test two objects
- dd = pf.h.all_data()
- t = ( (dd["Temperature"] > 0.5 )
- & (dd["Density"] < 0.75 )
- & (dd["x-velocity"] > 0.25 ) )
- r = dd.extract_region(t)
- yield assert_equal, np.all(r["Temperature"] > 0.5), True
- yield assert_equal, np.all(r["Density"] < 0.75), True
- yield assert_equal, np.all(r["x-velocity"] > 0.25), True
- yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
- yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- t2 = (r["Temperature"] < 0.75)
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
- yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
- yield assert_equal, np.all(r2["Temperature"] < 0.75), True
- t3 = (r["Temperature"] < 0.75)
- r3 = r.extract_region( t3 )
- yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
- yield assert_equal, np.all(r3["Temperature"] < 0.75), True
https://bitbucket.org/yt_analysis/yt/commits/0d41cc2f9bcd/
Changeset: 0d41cc2f9bcd
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 22:13:29
Summary: Adding a few tests that have caused me issues in the past.
Affected #: 1 file
diff -r bc562b02cc6e4f8c3b7c4c4095bfe40c0165bed7 -r 0d41cc2f9bcddd356eb073a6db8eb0bbce764b81 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -26,3 +26,17 @@
t2 = (r["Temperature"] < 0.75)
yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+ # Now we can test some projections
+ dd = pf.h.all_data()
+ cr = dd.cut_region(["obj['Ones'] > 0"])
+ for weight in [None, "Density"]:
+ p1 = pf.h.proj("Density", 0, data_source=dd, weight_field=weight)
+ p2 = pf.h.proj("Density", 0, data_source=cr, weight_field=weight)
+ for f in p1.field_data:
+ yield assert_almost_equal, p1[f], p2[f]
+ cr = dd.cut_region(["obj['Density'] > 0.25"])
+ p2 = pf.h.proj("Density", 2, data_source=cr)
+ yield assert_equal, p2["Density"].max() > 0.25, True
+ p2 = pf.h.proj("Density", 2, data_source=cr, weight_field = "Density")
+ yield assert_equal, p2["Density"].max() > 0.25, True
https://bitbucket.org/yt_analysis/yt/commits/030e47318755/
Changeset: 030e47318755
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 22:13:39
Summary: Update argument order in proj docstring.
Affected #: 1 file
diff -r 0d41cc2f9bcddd356eb073a6db8eb0bbce764b81 -r 030e47318755dd66482497f779a463ac91d8d216 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -167,12 +167,12 @@
Parameters
----------
- axis : int
- The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
field : string
This is the field which will be "projected" along the axis. If
multiple are specified (in a list) they will all be projected in
the first pass.
+ axis : int
+ The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
weight_field : string
If supplied, the field being projected will be multiplied by this
weight value before being integrated, and at the conclusion of the
https://bitbucket.org/yt_analysis/yt/commits/def155f22c2c/
Changeset: def155f22c2c
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 22:22:41
Summary: Ensure no NaN values are propagated into projection results.
Affected #: 1 file
diff -r 030e47318755dd66482497f779a463ac91d8d216 -r def155f22c2c32ce7eb3e541cbfc8f32f64bf5b2 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -309,6 +309,7 @@
nvals *= convs[None,:]
# We now convert to half-widths and center-points
data = {}
+ non_nan = (nwvals != 0)
data['px'] = px
data['py'] = py
data['weight_field'] = nwvals
@@ -320,8 +321,9 @@
field_data = np.hsplit(data.pop('fields'), len(fields))
for fi, field in enumerate(fields):
mylog.debug("Setting field %s", field)
- self[field] = field_data[fi].ravel()
- for i in data.keys(): self[i] = data.pop(i)
+ self[field] = field_data[fi].ravel()[non_nan]
+ for i in data.keys():
+ self[i] = data.pop(i)[non_nan]
mylog.info("Projection completed")
def _initialize_chunk(self, chunk, tree):
https://bitbucket.org/yt_analysis/yt/commits/9c0a9c598784/
Changeset: 9c0a9c598784
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 23:26:19
Summary: Re-enabling extract_connected_sets.
Note the comment about the number of joins.
Affected #: 3 files
diff -r def155f22c2c32ce7eb3e541cbfc8f32f64bf5b2 -r 9c0a9c598784e78ce18de4e34260238d169db7c3 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -64,4 +64,8 @@
pbar.finish()
rv = dict()
rv.update(contour_ids)
- return rv
+ # NOTE: Because joins can appear in both a "final join" and a subsequent
+ # "join", we can't know for sure how many unique joins there are without
+ # checking if no cells match or doing an expensive operation checking for
+ # the unique set of final join values.
+ return final_joins.size, rv
diff -r def155f22c2c32ce7eb3e541cbfc8f32f64bf5b2 -r 9c0a9c598784e78ce18de4e34260238d169db7c3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -763,7 +763,7 @@
self._grids = None
self.quantities = DerivedQuantityCollection(self)
- def cut_region(self, field_cuts):
+ def cut_region(self, field_cuts, field_parameters = None):
"""
Return an InLineExtractedRegion, where the object cells are cut on the
fly with a set of field_cuts. It is very useful for applying
@@ -780,7 +780,8 @@
>>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
>>> print cr.quantities["TotalQuantity"]("CellMassMsun")
"""
- cr = self.pf.h.cut_region(self, field_cuts)
+ cr = self.pf.h.cut_region(self, field_cuts,
+ field_parameters = field_parameters)
return cr
def extract_isocontours(self, field, value, filename = None,
@@ -972,12 +973,15 @@
ff, mask, grid.LeftEdge, grid.dds)
def extract_connected_sets(self, field, num_levels, min_val, max_val,
- log_space=True, cumulative=True, cache=False):
+ log_space=True, cumulative=True):
"""
This function will create a set of contour objects, defined
by having connected cell structures, which can then be
studied and used to 'paint' their source grids, thus enabling
them to be plotted.
+
+ Note that this function *can* return a connected set object that has no
+ member values.
"""
if log_space:
cons = np.logspace(np.log10(min_val),np.log10(max_val),
@@ -985,8 +989,6 @@
else:
cons = np.linspace(min_val, max_val, num_levels+1)
contours = {}
- if cache: cached_fields = defaultdict(lambda: dict())
- else: cached_fields = None
for level in range(num_levels):
contours[level] = {}
if cumulative:
@@ -994,10 +996,11 @@
else:
mv = cons[level+1]
from yt.analysis_modules.level_sets.api import identify_contours
- cids = identify_contours(self, field, cons[level], mv,
- cached_fields)
- for cid, cid_ind in cids.items():
- contours[level][cid] = self.extract_region(cid_ind)
+ nj, cids = identify_contours(self, field, cons[level], mv)
+ for cid in range(nj):
+ contours[level][cid] = self.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
return cons, contours
def paint_grids(self, field, value, default_value=None):
diff -r def155f22c2c32ce7eb3e541cbfc8f32f64bf5b2 -r 9c0a9c598784e78ce18de4e34260238d169db7c3 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -675,7 +675,8 @@
def chunks(self, fields, chunking_style, **kwargs):
# We actually want to chunk the sub-chunk, not ourselves. We have no
# chunks to speak of, as we do not data IO.
- for chunk in self.hierarchy._chunk(self.base_object, chunking_style,
+ for chunk in self.hierarchy._chunk(self.base_object,
+ chunking_style,
**kwargs):
with self.base_object._chunked_read(chunk):
self.get_data(fields)
@@ -692,10 +693,11 @@
def _cond_ind(self):
ind = None
obj = self.base_object
- for cond in self.conditionals:
- res = eval(cond)
- if ind is None: ind = res
- np.logical_and(res, ind, ind)
+ with obj._field_parameter_state(self.field_parameters):
+ for cond in self.conditionals:
+ res = eval(cond)
+ if ind is None: ind = res
+ np.logical_and(res, ind, ind)
return ind
@property
https://bitbucket.org/yt_analysis/yt/commits/6c2240ea58e9/
Changeset: 6c2240ea58e9
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 23:28:26
Summary: Merging from upstream.
Affected #: 97 files
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
+include distribute_setup.py README* CREDITS COPYING.txt CITATION
recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
+recursive-include yt *.pyx *.pxd *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there! You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses. It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there! You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms. It's written in
+python and heavily leverages both NumPy and Matplotlib for fast arrays and
+visualization, respectively.
Full documentation and a user community can be found at:
http://yt-project.org/
+
http://yt-project.org/doc/
If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
doc/install_script.sh . You will have to set the destination directory, and
there are options available, but it should be straightforward.
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or
+ways to help development, please visit our website.
Enjoy!
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410 xray_emissivity.h5' > xray_emissivity.h5.sha512
get_ytdata xray_emissivity.h5
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
mkdir -p ${DEST_DIR}/src
cd ${DEST_DIR}/src
@@ -918,6 +923,8 @@
do_setup_py $SYMPY
[ $INST_PYX -eq 1 ] && do_setup_py $PYX
+( ${DEST_DIR}/bin/pip install jinja2 2>&1 ) 1>> ${LOG_FILE}
+
# Now we build Rockstar and set its environment variable.
if [ $INST_ROCKSTAR -eq 1 ]
then
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -86,6 +86,10 @@
#Empty fit without any lines
yFit = na.ones(len(fluxData))
+ #Force the first and last flux pixel to be 1 to prevent OOB
+ fluxData[0]=1
+ fluxData[-1]=1
+
#Find all regions where lines/groups of lines are present
cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
complexLim=complexLim, minLength=minLength,
@@ -120,9 +124,10 @@
z,fitLim,minError*(b[2]-b[1]),speciesDict)
#Check existence of partner lines if applicable
- newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
- b, minError*(b[2]-b[1]),
- x0, xRes, speciesDict)
+ if len(speciesDict['wavelength']) != 1:
+ newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
+ b, minError*(b[2]-b[1]),
+ x0, xRes, speciesDict)
#If flagged as a bad fit, species is lyman alpha,
# and it may be a saturated line, use special tools
@@ -548,6 +553,10 @@
#Index of the redshifted wavelength
indexRedWl = (redWl-x0)/xRes
+ #Check to see if even in flux range
+ if indexRedWl > len(y):
+ return False
+
#Check if surpasses minimum absorption bound
if y[int(indexRedWl)]>fluxMin:
return False
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,16 @@
from .radmc3d_export.api import \
RadMC3DWriter
+from .particle_trajectories.api import \
+ ParticleTrajectories
+
+from .photon_simulator.api import \
+ PhotonList, \
+ EventList, \
+ SpectralModel, \
+ XSpecThermalModel, \
+ XSpecAbsorbModel, \
+ TableApecModel, \
+ TableAbsorbModel, \
+ PhotonModel, \
+ ThermalPhotonModel
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
self._calculate_deltaz_min(deltaz_min=deltaz_min)
cosmology_splice = []
-
+
+ if near_redshift == far_redshift:
+ self.simulation.get_time_series(redshifts=[near_redshift])
+ cosmology_splice.append({'time': self.simulation[0].current_time,
+ 'redshift': self.simulation[0].current_redshift,
+ 'filename': os.path.join(self.simulation[0].fullpath,
+ self.simulation[0].basename),
+ 'next': None})
+ mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+ (cosmology_splice[0]['filename'], near_redshift))
+ return cosmology_splice
+
# Use minimum number of datasets to go from z_i to z_f.
if minimal:
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
only_on_root, \
parallel_objects, \
parallel_root_only
+from yt.utilities.physical_constants import \
+ speed_of_light_cgs, \
+ cm_per_km
class LightRay(CosmologySplice):
"""
@@ -51,7 +54,9 @@
near_redshift : float
The near (lowest) redshift for the light ray.
far_redshift : float
- The far (highest) redshift for the light ray.
+ The far (highest) redshift for the light ray. NOTE: in order
+ to use only a single dataset in a light ray, set the
+ near_redshift and far_redshift to be the same.
use_minimum_datasets : bool
If True, the minimum number of datasets is used to connect the
initial and final redshift. If false, the light ray solution
@@ -111,65 +116,92 @@
time_data=time_data,
redshift_data=redshift_data)
- def _calculate_light_ray_solution(self, seed=None, filename=None):
+ def _calculate_light_ray_solution(self, seed=None,
+ start_position=None, end_position=None,
+ trajectory=None, filename=None):
"Create list of datasets to be added together to make the light ray."
# Calculate dataset sizes, and get random dataset axes and centers.
np.random.seed(seed)
- # For box coherence, keep track of effective depth travelled.
- box_fraction_used = 0.0
+ # If using only one dataset, set start and stop manually.
+ if start_position is not None:
+ if len(self.light_ray_solution) > 1:
+ raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+ if not ((end_position is None) ^ (trajectory is None)):
+ raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+ self.light_ray_solution[0]['start'] = np.array(start_position)
+ if end_position is not None:
+ self.light_ray_solution[0]['end'] = np.array(end_position)
+ else:
+ # assume trajectory given as r, theta, phi
+ if len(trajectory) != 3:
+ raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+ r, theta, phi = trajectory
+ self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+ r * np.array([np.cos(phi) * np.sin(theta),
+ np.sin(phi) * np.sin(theta),
+ np.cos(theta)])
+ self.light_ray_solution[0]['traversal_box_fraction'] = \
+ vector_length(self.light_ray_solution[0]['start'],
+ self.light_ray_solution[0]['end'])
- for q in range(len(self.light_ray_solution)):
- if (q == len(self.light_ray_solution) - 1):
- z_next = self.near_redshift
- else:
- z_next = self.light_ray_solution[q+1]['redshift']
+ # the normal way (random start positions and trajectories for each dataset)
+ else:
+
+ # For box coherence, keep track of effective depth travelled.
+ box_fraction_used = 0.0
- # Calculate fraction of box required for a depth of delta z
- self.light_ray_solution[q]['traversal_box_fraction'] = \
- self.cosmology.ComovingRadialDistance(\
- z_next, self.light_ray_solution[q]['redshift']) * \
- self.simulation.hubble_constant / \
- self.simulation.box_size
+ for q in range(len(self.light_ray_solution)):
+ if (q == len(self.light_ray_solution) - 1):
+ z_next = self.near_redshift
+ else:
+ z_next = self.light_ray_solution[q+1]['redshift']
- # Simple error check to make sure more than 100% of box depth
- # is never required.
- if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
- mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
- (self.light_ray_solution[q]['redshift'], z_next,
- self.light_ray_solution[q]['traversal_box_fraction']))
- mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
- (self.light_ray_solution[q]['deltazMax'],
- self.light_ray_solution[q]['redshift']-z_next))
+ # Calculate fraction of box required for a depth of delta z
+ self.light_ray_solution[q]['traversal_box_fraction'] = \
+ self.cosmology.ComovingRadialDistance(\
+ z_next, self.light_ray_solution[q]['redshift']) * \
+ self.simulation.hubble_constant / \
+ self.simulation.box_size
- # Get dataset axis and center.
- # If using box coherence, only get start point and vector if
- # enough of the box has been used,
- # or if box_fraction_used will be greater than 1 after this slice.
- if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
- (box_fraction_used >
- self.minimum_coherent_box_fraction) or \
- (box_fraction_used +
- self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
- # Random start point
- self.light_ray_solution[q]['start'] = np.random.random(3)
- theta = np.pi * np.random.random()
- phi = 2 * np.pi * np.random.random()
- box_fraction_used = 0.0
- else:
- # Use end point of previous segment and same theta and phi.
- self.light_ray_solution[q]['start'] = \
- self.light_ray_solution[q-1]['end'][:]
+ # Simple error check to make sure more than 100% of box depth
+ # is never required.
+ if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+ mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+ (self.light_ray_solution[q]['redshift'], z_next,
+ self.light_ray_solution[q]['traversal_box_fraction']))
+ mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+ (self.light_ray_solution[q]['deltazMax'],
+ self.light_ray_solution[q]['redshift']-z_next))
- self.light_ray_solution[q]['end'] = \
- self.light_ray_solution[q]['start'] + \
- self.light_ray_solution[q]['traversal_box_fraction'] * \
- np.array([np.cos(phi) * np.sin(theta),
- np.sin(phi) * np.sin(theta),
- np.cos(theta)])
- box_fraction_used += \
- self.light_ray_solution[q]['traversal_box_fraction']
+ # Get dataset axis and center.
+ # If using box coherence, only get start point and vector if
+ # enough of the box has been used,
+ # or if box_fraction_used will be greater than 1 after this slice.
+ if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+ (box_fraction_used >
+ self.minimum_coherent_box_fraction) or \
+ (box_fraction_used +
+ self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+ # Random start point
+ self.light_ray_solution[q]['start'] = np.random.random(3)
+ theta = np.pi * np.random.random()
+ phi = 2 * np.pi * np.random.random()
+ box_fraction_used = 0.0
+ else:
+ # Use end point of previous segment and same theta and phi.
+ self.light_ray_solution[q]['start'] = \
+ self.light_ray_solution[q-1]['end'][:]
+
+ self.light_ray_solution[q]['end'] = \
+ self.light_ray_solution[q]['start'] + \
+ self.light_ray_solution[q]['traversal_box_fraction'] * \
+ np.array([np.cos(phi) * np.sin(theta),
+ np.sin(phi) * np.sin(theta),
+ np.cos(theta)])
+ box_fraction_used += \
+ self.light_ray_solution[q]['traversal_box_fraction']
if filename is not None:
self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
'far_redshift':self.far_redshift,
'near_redshift':self.near_redshift})
- def make_light_ray(self, seed=None, fields=None,
+ def make_light_ray(self, seed=None,
+ start_position=None, end_position=None,
+ trajectory=None,
+ fields=None,
solution_filename=None, data_filename=None,
get_los_velocity=False,
get_nearest_halo=False,
@@ -197,6 +232,19 @@
seed : int
Seed for the random number generator.
Default: None.
+ start_position : list of floats
+ Used only if creating a light ray from a single dataset.
+ The coordinates of the starting position of the ray.
+ Default: None.
+ end_position : list of floats
+ Used only if creating a light ray from a single dataset.
+ The coordinates of the ending position of the ray.
+ Default: None.
+ trajectory : list of floats
+ Used only if creating a light ray from a single dataset.
+ The (r, theta, phi) direction of the light ray. Use either
+ end_position or trajectory, not both.
+ Default: None.
fields : list
A list of fields for which to get data.
Default: None.
@@ -313,7 +361,11 @@
nearest_halo_fields = []
# Calculate solution.
- self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+ self._calculate_light_ray_solution(seed=seed,
+ start_position=start_position,
+ end_position=end_position,
+ trajectory=trajectory,
+ filename=solution_filename)
# Initialize data structures.
self._data = {}
@@ -335,9 +387,18 @@
for my_storage, my_segment in parallel_objects(self.light_ray_solution,
storage=all_ray_storage,
njobs=njobs, dynamic=dynamic):
- mylog.info("Creating ray segment at z = %f." %
- my_segment['redshift'])
- if my_segment['next'] is None:
+
+ # Load dataset for segment.
+ pf = load(my_segment['filename'])
+
+ if self.near_redshift == self.far_redshift:
+ h_vel = cm_per_km * pf.units['mpc'] * \
+ vector_length(my_segment['start'], my_segment['end']) * \
+ self.cosmology.HubbleConstantNow * \
+ self.cosmology.ExpansionFactor(my_segment['redshift'])
+ next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+ (1. - h_vel / speed_of_light_cgs)) - 1.
+ elif my_segment['next'] is None:
next_redshift = self.near_redshift
else:
next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
(my_segment['redshift'], my_segment['start'],
my_segment['end']))
- # Load dataset for segment.
- pf = load(my_segment['filename'])
-
# Break periodic ray into non-periodic segments.
sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+ r"""A collection of particle trajectories in time over a series of
+ parameter files.
+
+ The ParticleTrajectories object contains a collection of
+ particle trajectories for a specified set of particle indices.
+
+ Parameters
+ ----------
+ filenames : list of strings
+ A time-sorted list of filenames to construct the TimeSeriesData
+ object.
+ indices : array_like
+ An integer array of particle indices whose trajectories we
+ want to track. If they are not sorted they will be sorted.
+ fields : list of strings, optional
+ A set of fields that is retrieved when the trajectory
+ collection is instantiated.
+ Default : None (will default to the fields 'particle_position_x',
+ 'particle_position_y', 'particle_position_z')
+
+ Examples
+ ________
+ >>> from yt.mods import *
+ >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+ >>> my_fns.sort()
+ >>> fields = ["particle_position_x", "particle_position_y",
+ >>> "particle_position_z", "particle_velocity_x",
+ >>> "particle_velocity_y", "particle_velocity_z"]
+ >>> pf = load(my_fns[0])
+ >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+ >>> indices = init_sphere["particle_index"].astype("int")
+ >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+ >>> for t in trajs :
+ >>> print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+ Notes
+ -----
+ As of this time only particle trajectories that are complete over the
+ set of specified parameter files are supported. If any particle's history
+ ends for some reason (e.g. leaving the simulation domain or being actively
+ destroyed), the whole trajectory collection of which it is a set must end
+ at or before the particle's last timestep. This is a limitation we hope to
+ lift at some point in the future.
+ """
+ def __init__(self, filenames, indices, fields=None) :
+
+ indices.sort() # Just in case the caller wasn't careful
+
+ self.field_data = YTFieldData()
+ self.pfs = TimeSeriesData.from_filenames(filenames)
+ self.masks = []
+ self.sorts = []
+ self.indices = indices
+ self.num_indices = len(indices)
+ self.num_steps = len(filenames)
+ self.times = []
+
+ # Default fields
+
+ if fields is None: fields = []
+
+ # Must ALWAYS have these fields
+
+ fields = fields + ["particle_position_x",
+ "particle_position_y",
+ "particle_position_z"]
+
+ # Set up the derived field list and the particle field list
+ # so that if the requested field is a particle field, we'll
+ # just copy the field over, but if the field is a grid field,
+ # we will first interpolate the field to the particle positions
+ # and then return the field.
+
+ pf = self.pfs[0]
+ self.derived_field_list = pf.h.derived_field_list
+ self.particle_fields = [field for field in self.derived_field_list
+ if pf.field_info[field].particle_type]
+
+ """
+ The following loops through the parameter files
+ and performs two tasks. The first is to isolate
+ the particles with the correct indices, and the
+ second is to create a sorted list of these particles.
+ We also make a list of the current time from each file.
+ Right now, the code assumes (and checks for) the
+ particle indices existing in each dataset, a limitation I
+ would like to lift at some point since some codes
+ (e.g., FLASH) destroy particles leaving the domain.
+ """
+
+ for pf in self.pfs:
+ dd = pf.h.all_data()
+ newtags = dd["particle_index"].astype("int")
+ if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+ print "Not all requested particle ids contained in this dataset!"
+ raise IndexError
+ mask = np.in1d(newtags, indices, assume_unique=True)
+ sorts = np.argsort(newtags[mask])
+ self.masks.append(mask)
+ self.sorts.append(sorts)
+ self.times.append(pf.current_time)
+
+ self.times = np.array(self.times)
+
+ # Now instantiate the requested fields
+ for field in fields:
+ self._get_data(field)
+
+ def has_key(self, key):
+ return (key in self.field_data)
+
+ def keys(self):
+ return self.field_data.keys()
+
+ def __getitem__(self, key):
+ """
+ Get the field associated with key,
+ checking to make sure it is a particle field.
+ """
+ if key == "particle_time":
+ return self.times
+ if not self.field_data.has_key(key):
+ self._get_data(key)
+ return self.field_data[key]
+
+ def __setitem__(self, key, val):
+ """
+ Sets a field to be some other value.
+ """
+ self.field_data[key] = val
+
+ def __delitem__(self, key):
+ """
+ Delete the field from the trajectory
+ """
+ del self.field_data[key]
+
+ def __iter__(self):
+ """
+ This iterates over the trajectories for
+ the different particles, returning dicts
+ of fields for each trajectory
+ """
+ for idx in xrange(self.num_indices):
+ traj = {}
+ traj["particle_index"] = self.indices[idx]
+ traj["particle_time"] = self.times
+ for field in self.field_data.keys():
+ traj[field] = self[field][idx,:]
+ yield traj
+
+ def __len__(self):
+ """
+ The number of individual trajectories
+ """
+ return self.num_indices
+
+ def add_fields(self, fields):
+ """
+ Add a list of fields to an existing trajectory
+
+ Parameters
+ ----------
+ fields : list of strings
+ A list of fields to be added to the current trajectory
+ collection.
+
+ Examples
+ ________
+ >>> from yt.mods import *
+ >>> trajs = ParticleTrajectories(my_fns, indices)
+ >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+ """
+ for field in fields:
+ if not self.field_data.has_key(field):
+ self._get_data(field)
+
+ def _get_data(self, field):
+ """
+ Get a field to include in the trajectory collection.
+ The trajectory collection itself is a dict of 2D numpy arrays,
+ with shape (num_indices, num_steps)
+ """
+ if not self.field_data.has_key(field):
+ particles = np.empty((0))
+ step = int(0)
+ for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+ if field in self.particle_fields:
+ # This is easy... just get the particle fields
+ dd = pf.h.all_data()
+ pfield = dd[field][mask]
+ particles = np.append(particles, pfield[sort])
+ else:
+ # This is hard... must loop over grids
+ pfield = np.zeros((self.num_indices))
+ x = self["particle_position_x"][:,step]
+ y = self["particle_position_y"][:,step]
+ z = self["particle_position_z"][:,step]
+ particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+ for grid in particle_grids:
+ cube = grid.retrieve_ghost_zones(1, [field])
+ CICSample_3(x,y,z,pfield,
+ self.num_indices,
+ cube[field],
+ np.array(grid.LeftEdge).astype(np.float64),
+ np.array(grid.ActiveDimensions).astype(np.int32),
+ np.float64(grid['dx']))
+ particles = np.append(particles, pfield)
+ step += 1
+ self[field] = particles.reshape(self.num_steps,
+ self.num_indices).transpose()
+ return self.field_data[field]
+
+ def trajectory_from_index(self, index):
+ """
+ Retrieve a single trajectory corresponding to a specific particle
+ index
+
+ Parameters
+ ----------
+ index : int
+ This defines which particle trajectory from the
+ ParticleTrajectories object will be returned.
+
+ Returns
+ -------
+ A dictionary corresponding to the particle's trajectory and the
+ fields along that trajectory
+
+ Examples
+ --------
+ >>> from yt.mods import *
+ >>> import matplotlib.pylab as pl
+ >>> trajs = ParticleTrajectories(my_fns, indices)
+ >>> traj = trajs.trajectory_from_index(indices[0])
+ >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+ >>> pl.savefig("orbit")
+ """
+ mask = np.in1d(self.indices, (index,), assume_unique=True)
+ if not np.any(mask):
+ print "The particle index %d is not in the list!" % (index)
+ raise IndexError
+ fields = [field for field in sorted(self.field_data.keys())]
+ traj = {}
+ traj["particle_time"] = self.times
+ traj["particle_index"] = index
+ for field in fields:
+ traj[field] = self[field][mask,:][0]
+ return traj
+
+ def write_out(self, filename_base):
+ """
+ Write out particle trajectories to tab-separated ASCII files (one
+ for each trajectory) with the field names in the file header. Each
+ file is named with a basename and the index number.
+
+ Parameters
+ ----------
+ filename_base : string
+ The prefix for the outputted ASCII files.
+
+ Examples
+ --------
+ >>> from yt.mods import *
+ >>> trajs = ParticleTrajectories(my_fns, indices)
+ >>> trajs.write_out("orbit_trajectory")
+ """
+ fields = [field for field in sorted(self.field_data.keys())]
+ num_fields = len(fields)
+ first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+ template_str = "%g\t"*num_fields+"%g\n"
+ for ix in xrange(self.num_indices):
+ outlines = [first_str]
+ for it in xrange(self.num_steps):
+ outlines.append(template_str %
+ tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+ fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+ fid.writelines(outlines)
+ fid.close()
+ del fid
+
+ def write_out_h5(self, filename):
+ """
+ Write out all the particle trajectories to a single HDF5 file
+ that contains the indices, the times, and the 2D array for each
+ field individually
+
+ Parameters
+ ----------
+
+ filename : string
+ The output filename for the HDF5 file
+
+ Examples
+ --------
+
+ >>> from yt.mods import *
+ >>> trajs = ParticleTrajectories(my_fns, indices)
+ >>> trajs.write_out_h5("orbit_trajectories")
+ """
+ fid = h5py.File(filename, "w")
+ fields = [field for field in sorted(self.field_data.keys())]
+ fid.create_dataset("particle_indices", dtype=np.int32,
+ data=self.indices)
+ fid.create_dataset("particle_time", data=self.times)
+ for field in fields:
+ fid.create_dataset("%s" % field, data=self[field])
+ fid.close()
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('particle_trajectories', parent_package, top_path)
+ #config.add_subpackage("tests")
+ config.make_config_py() # installs __config__.py
+ #config.make_svn_version_py()
+ return config
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/photon_simulator/api.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.analysis_modules.photon_simulator.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .photon_models import \
+ PhotonModel, \
+ ThermalPhotonModel
+
+from .photon_simulator import \
+ PhotonList, \
+ EventList
+
+from .spectral_models import \
+ SpectralModel, \
+ XSpecThermalModel, \
+ XSpecAbsorbModel, \
+ TableApecModel, \
+ TableAbsorbModel
diff -r 9c0a9c598784e78ce18de4e34260238d169db7c3 -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 yt/analysis_modules/photon_simulator/photon_models.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -0,0 +1,205 @@
+"""
+Classes for specific photon models
+
+The algorithms used here are based off of the method used by the
+PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
+developed by Veronica Biffi and Klaus Dolag. References for
+PHOX may be found at:
+
+Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545
+http://adsabs.harvard.edu/abs/2012MNRAS.420.3545B
+
+Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395
+http://adsabs.harvard.edu/abs/2013MNRAS.428.1395B
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import *
+from yt.utilities.physical_constants import \
+ mp, cm_per_km, K_per_keV, cm_per_mpc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ communication_system
+
+N_TBIN = 10000
+TMIN = 8.08e-2
+TMAX = 50.
+
+comm = communication_system.communicators[-1]
+
+class PhotonModel(object):
+
+ def __init__(self):
+ pass
+
+ def __call__(self, data_source, parameters):
+ photons = {}
+ return photons
+
+class ThermalPhotonModel(PhotonModel):
+ r"""
+ Initialize a ThermalPhotonModel from a thermal spectrum.
+
+ Parameters
+ ----------
+
+ spectral_model : `SpectralModel`
+ A thermal spectral model instance, either of `XSpecThermalModel`
+ or `TableApecModel`.
+ X_H : float, optional
+ The hydrogen mass fraction.
+ Zmet : float or string, optional
+ The metallicity. If a float, assumes a constant metallicity throughout.
+ If a string, is taken to be the name of the metallicity field.
+ """
+ def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+ self.X_H = X_H
+ self.Zmet = Zmet
+ self.spectral_model = spectral_model
+
+ def __call__(self, data_source, parameters):
+
+ pf = data_source.pf
+
+ exp_time = parameters["FiducialExposureTime"]
+ area = parameters["FiducialArea"]
+ redshift = parameters["FiducialRedshift"]
+ D_A = parameters["FiducialAngularDiameterDistance"]*cm_per_mpc
+ dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
+
+ vol_scale = pf.units["cm"]**(-3)/np.prod(pf.domain_width)
+
+ num_cells = data_source["Temperature"].shape[0]
+ start_c = comm.rank*num_cells/comm.size
+ end_c = (comm.rank+1)*num_cells/comm.size
+
+ kT = data_source["Temperature"][start_c:end_c].copy()/K_per_keV
+ vol = data_source["CellVolume"][start_c:end_c].copy()
+ dx = data_source["dx"][start_c:end_c].copy()
+ EM = (data_source["Density"][start_c:end_c].copy()/mp)**2
+ EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+
+ data_source.clear_data()
+
+ x = data_source["x"][start_c:end_c].copy()
+ y = data_source["y"][start_c:end_c].copy()
+ z = data_source["z"][start_c:end_c].copy()
+
+ data_source.clear_data()
+
+ vx = data_source["x-velocity"][start_c:end_c].copy()
+ vy = data_source["y-velocity"][start_c:end_c].copy()
+ vz = data_source["z-velocity"][start_c:end_c].copy()
+
+ if isinstance(self.Zmet, basestring):
+ metalZ = data_source[self.Zmet][start_c:end_c].copy()
+ else:
+ metalZ = self.Zmet*np.ones(EM.shape)
+
+ data_source.clear_data()
+
+ idxs = np.argsort(kT)
+ dshape = idxs.shape
+
+ kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
+ dkT = kT_bins[1]-kT_bins[0]
+ kT_idxs = np.digitize(kT[idxs], kT_bins)
+ kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
+ bcounts = np.bincount(kT_idxs).astype("int")
+ bcounts = bcounts[bcounts > 0]
+ n = int(0)
+ bcell = []
+ ecell = []
+ for bcount in bcounts:
+ bcell.append(n)
+ ecell.append(n+bcount)
+ n += bcount
+ kT_idxs = np.unique(kT_idxs)
+
+ self.spectral_model.prepare()
+ energy = self.spectral_model.ebins
+
+ cell_em = EM[idxs]*vol_scale
+ cell_vol = vol[idxs]*vol_scale
+
+ number_of_photons = np.zeros(dshape, dtype='uint64')
+ energies = []
+
+ u = np.random.random(cell_em.shape)
+
+ pbar = get_pbar("Generating Photons", dshape[0])
+
+ for i, ikT in enumerate(kT_idxs):
+
+ ncells = int(bcounts[i])
+ ibegin = bcell[i]
+ iend = ecell[i]
+ kT = kT_bins[ikT] + 0.5*dkT
+
+ em_sum_c = cell_em[ibegin:iend].sum()
+ em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+
+ cspec, mspec = self.spectral_model.get_spectrum(kT)
+ cspec *= dist_fac*em_sum_c/vol_scale
+ mspec *= dist_fac*em_sum_m/vol_scale
+
+ cumspec_c = np.cumsum(cspec)
+ counts_c = cumspec_c[:]/cumspec_c[-1]
+ counts_c = np.insert(counts_c, 0, 0.0)
+ tot_ph_c = cumspec_c[-1]*area*exp_time
+
+ cumspec_m = np.cumsum(mspec)
+ counts_m = cumspec_m[:]/cumspec_m[-1]
+ counts_m = np.insert(counts_m, 0, 0.0)
+ tot_ph_m = cumspec_m[-1]*area*exp_time
+
+ for icell in xrange(ibegin, iend):
+
+ cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+ cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+
+ cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+ cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+
+ cell_n = cell_n_c + cell_n_m
+
+ if cell_n > 0:
+ number_of_photons[icell] = cell_n
+ randvec_c = np.random.uniform(size=cell_n_c)
+ randvec_c.sort()
+ randvec_m = np.random.uniform(size=cell_n_m)
+ randvec_m.sort()
+ cell_e_c = np.interp(randvec_c, counts_c, energy)
+ cell_e_m = np.interp(randvec_m, counts_m, energy)
+ energies.append(np.concatenate([cell_e_c,cell_e_m]))
+
+ pbar.update(icell)
+
+ pbar.finish()
+
+ active_cells = number_of_photons > 0
+ idxs = idxs[active_cells]
+
+ photons = {}
+
+ src_ctr = parameters["center"]
+
+ photons["x"] = (x[idxs]-src_ctr[0])*pf.units["kpc"]
+ photons["y"] = (y[idxs]-src_ctr[1])*pf.units["kpc"]
+ photons["z"] = (z[idxs]-src_ctr[2])*pf.units["kpc"]
+ photons["vx"] = vx[idxs]/cm_per_km
+ photons["vy"] = vy[idxs]/cm_per_km
+ photons["vz"] = vz[idxs]/cm_per_km
+ photons["dx"] = dx[idxs]*pf.units["kpc"]
+ photons["NumberOfPhotons"] = number_of_photons[active_cells]
+ photons["Energy"] = np.concatenate(energies)
+
+ return photons
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/0fe4a50b91a8/
Changeset: 0fe4a50b91a8
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-26 23:55:07
Summary: Ensure we don't preload derived fields.
Affected #: 1 file
diff -r 6c2240ea58e932f2c62939c7c6ae7e3e16d5b892 -r 0fe4a50b91a8a439d1bb65b9976aebf8c426275d yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -273,7 +273,9 @@
giter = sorted(gobjs, key = -g.Level)
elif sort is None:
giter = gobjs
- if self._preload_implemented and preload_fields is not None and ngz == 0:
+ if preload_fields is None: preload_fields = []
+ preload_fields, _ = self._split_fields(preload_fields)
+ if self._preload_implemented and len(preload_fields) > 0 and ngz == 0:
giter = ChunkDataCache(list(giter), preload_fields, self)
for i, og in enumerate(giter):
if ngz > 0:
https://bitbucket.org/yt_analysis/yt/commits/081c9458170a/
Changeset: 081c9458170a
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-27 15:48:50
Summary: Fixing how the nans are checked for, which is slightly more expensive.
Affected #: 1 file
diff -r 0fe4a50b91a8a439d1bb65b9976aebf8c426275d -r 081c9458170ad78a0c6af4b4fbeb72dd816dcc0f yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -309,7 +309,7 @@
nvals *= convs[None,:]
# We now convert to half-widths and center-points
data = {}
- non_nan = (nwvals != 0)
+ non_nan = ~np.any(np.isnan(nvals), axis=-1)
data['px'] = px
data['py'] = py
data['weight_field'] = nwvals
https://bitbucket.org/yt_analysis/yt/commits/8238faf0163b/
Changeset: 8238faf0163b
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-27 15:49:06
Summary: Don't compare NaN values, as we now filter them out.
Affected #: 1 file
diff -r 081c9458170ad78a0c6af4b4fbeb72dd816dcc0f -r 8238faf0163ba0dc4a9e1d88742be3f72be3d167 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -444,19 +444,26 @@
if new_result is None:
return
assert(len(new_result) == len(old_result))
+ nind, oind = None, None
for k in new_result:
assert (k in old_result)
+ if oind is None: oind = np.isnan(old_result[k])
+ np.logical_or(oind, np.isnan(old_result[k]), oind)
+ if nind is None: nind = np.isnan(new_result[k])
+ np.logical_or(nind, np.isnan(new_result[k]), nind)
+ oind = ~oind
+ nind = ~nind
for k in new_result:
err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
(k, self.field, self.weight_field, self.axis)
if k == 'weight_field' and self.weight_field is None:
continue
+ nres, ores = new_result[k][nind], old_result[k][oind]
if self.decimals is None:
- assert_equal(new_result[k], old_result[k],
- err_msg=err_msg)
+ assert_equal(nres, ores, err_msg=err_msg)
else:
- assert_allclose(new_result[k], old_result[k],
- 10.**-(self.decimals), err_msg=err_msg)
+ assert_allclose(nres, ores, 10.**-(self.decimals),
+ err_msg=err_msg)
class PixelizedProjectionValuesTest(AnswerTestingTest):
_type_name = "PixelizedProjectionValues"
https://bitbucket.org/yt_analysis/yt/commits/d39fa2e575b4/
Changeset: d39fa2e575b4
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-27 15:55:54
Summary: Update Clump object to use new identify_contours.
Affected #: 1 file
diff -r 8238faf0163ba0dc4a9e1d88742be3f72be3d167 -r d39fa2e575b40b16cc3559c4650ebacd1bf0b931 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -107,10 +107,11 @@
print "Wiping out existing children clumps."
self.children = []
if max_val is None: max_val = self.max_val
- contour_info = identify_contours(self.data, self.field, min_val, max_val,
- self.cached_fields)
- for cid in contour_info:
- new_clump = self.data.extract_region(contour_info[cid])
+ nj, cids = identify_contours(self.data, self.field, min_val, max_val)
+ for cid in range(nj):
+ new_clump = self.data.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
self.children.append(Clump(new_clump, self, self.field,
self.cached_fields,function=self.function,
clump_info=self.clump_info))
https://bitbucket.org/yt_analysis/yt/commits/30e9ec2d22a9/
Changeset: 30e9ec2d22a9
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-27 16:03:07
Summary: Adding comments to ContourFinding.pyx.
Affected #: 1 file
diff -r d39fa2e575b40b16cc3559c4650ebacd1bf0b931 -r 30e9ec2d22a97a30c693cfafcfb3627f216e562b yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -105,6 +105,22 @@
return node
cdef class ContourTree:
+ # This class is essentially a Union-Find algorithm. What we want to do is
+ # to, given a connection between two objects, identify the unique ID for
+ # those two objects. So what we have is a collection of contours, and they
+ # eventually all get joined and contain lots of individual IDs. But it's
+ # easy to find the *first* contour, i.e., the primary ID, for each of the
+ # subsequent IDs.
+ #
+ # This means that we can connect id 202483 to id 2472, and if id 2472 is
+ # connected to id 143, the connection will *actually* be from 202483 to
+ # 143. In this way we can speed up joining things and knowing their
+ # "canonical" id.
+ #
+ # This is a multi-step process, since we first want to connect all of the
+ # contours, then we end up wanting to coalesce them, and ultimately we join
+ # them at the end. The join produces a table that maps the initial to the
+ # final, and we can go through and just update all of those.
cdef ContourID *first
cdef ContourID *last
@@ -124,6 +140,9 @@
@cython.boundscheck(False)
@cython.wraparound(False)
def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
+ # This adds new contours, from the given contour IDs, to the tree.
+ # Each one can be connected to a parent, as well as to next/prev in the
+ # set of contours belonging to this tree.
cdef int i, n
n = contour_ids.shape[0]
cdef ContourID *cur = self.last
@@ -137,7 +156,9 @@
self.last = contour_create(contour_id, self.last)
def cull_candidates(self, np.ndarray[np.int64_t, ndim=3] candidates):
- # This is a helper function.
+ # This function looks at each preliminary contour ID belonging to a
+ # given collection of values, and then if need be it creates a new
+ # contour for it.
cdef int i, j, k, ni, nj, nk, nc
cdef CandidateContour *first = NULL
cdef CandidateContour *temp
@@ -157,6 +178,8 @@
cdef np.ndarray[np.int64_t, ndim=1] contours
contours = np.empty(nc, dtype="int64")
i = 0
+ # This removes all the temporary contours for this set of contours and
+ # instead constructs a final list of them.
while first != NULL:
contours[i] = first.contour_id
i += 1
@@ -166,6 +189,9 @@
return contours
def cull_joins(self, np.ndarray[np.int64_t, ndim=2] cjoins):
+ # This coalesces contour IDs, so that we have only the final name
+ # resolutions -- the .join_id from a candidate. So many items will map
+ # to a single join_id.
cdef int i, j, k, ni, nj, nk, nc
cdef CandidateContour *first = NULL
cdef CandidateContour *temp
@@ -266,6 +292,8 @@
def identify_contours(self, np.ndarray[np.float64_t, ndim=3] values,
np.ndarray[np.int64_t, ndim=3] contour_ids,
np.int64_t start):
+ # This just looks at neighbor values and tries to identify which zones
+ # are touching by face within a given brick.
cdef int i, j, k, ni, nj, nk, offset
cdef int off_i, off_j, off_k, oi, ok, oj
cdef ContourID *cur = NULL
https://bitbucket.org/yt_analysis/yt/commits/9eab26fec77f/
Changeset: 9eab26fec77f
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-27 21:19:13
Summary: Watch out for NaNs in the projection results.
Affected #: 1 file
diff -r 30e9ec2d22a97a30c693cfafcfb3627f216e562b -r 9eab26fec77f373c896e2da5d9aef13e079c283e yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -447,9 +447,11 @@
nind, oind = None, None
for k in new_result:
assert (k in old_result)
- if oind is None: oind = np.isnan(old_result[k])
+ if oind is None:
+ oind = np.array(np.isnan(old_result[k]))
np.logical_or(oind, np.isnan(old_result[k]), oind)
- if nind is None: nind = np.isnan(new_result[k])
+ if nind is None:
+ nind = np.array(np.isnan(new_result[k]))
np.logical_or(nind, np.isnan(new_result[k]), nind)
oind = ~oind
nind = ~nind
https://bitbucket.org/yt_analysis/yt/commits/396b13456c0b/
Changeset: 396b13456c0b
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-27 21:20:19
Summary: Backing out the projection value changes for the time being.
Affected #: 1 file
diff -r 9eab26fec77f373c896e2da5d9aef13e079c283e -r 396b13456c0bdf873dcd060b1f8b508038ca337c yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -309,7 +309,7 @@
nvals *= convs[None,:]
# We now convert to half-widths and center-points
data = {}
- non_nan = ~np.any(np.isnan(nvals), axis=-1)
+ #non_nan = ~np.any(np.isnan(nvals), axis=-1)
data['px'] = px
data['py'] = py
data['weight_field'] = nwvals
@@ -321,9 +321,9 @@
field_data = np.hsplit(data.pop('fields'), len(fields))
for fi, field in enumerate(fields):
mylog.debug("Setting field %s", field)
- self[field] = field_data[fi].ravel()[non_nan]
+ self[field] = field_data[fi].ravel()#[non_nan]
for i in data.keys():
- self[i] = data.pop(i)[non_nan]
+ self[i] = data.pop(i)#[non_nan]
mylog.info("Projection completed")
def _initialize_chunk(self, chunk, tree):
https://bitbucket.org/yt_analysis/yt/commits/38fff02747ca/
Changeset: 38fff02747ca
Branch: yt-3.0
User: MatthewTurk
Date: 2013-12-02 03:27:18
Summary: Merged in MatthewTurk/yt-3.0 (pull request #120)
Contour finder rewrite
Affected #: 18 files
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -14,7 +14,6 @@
#-----------------------------------------------------------------------------
from .contour_finder import \
- coalesce_join_tree, \
identify_contours
from .clump_handling import \
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -107,10 +107,11 @@
print "Wiping out existing children clumps."
self.children = []
if max_val is None: max_val = self.max_val
- contour_info = identify_contours(self.data, self.field, min_val, max_val,
- self.cached_fields)
- for cid in contour_info:
- new_clump = self.data.extract_region(contour_info[cid])
+ nj, cids = identify_contours(self.data, self.field, min_val, max_val)
+ for cid in range(nj):
+ new_clump = self.data.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
self.children.append(Clump(new_clump, self, self.field,
self.cached_fields,function=self.function,
clump_info=self.clump_info))
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -20,120 +20,52 @@
import yt.utilities.data_point_utilities as data_point_utilities
import yt.utilities.lib as amr_utils
-def coalesce_join_tree(jtree1):
- joins = defaultdict(set)
- nj = jtree1.shape[0]
- for i1 in range(nj):
- current_new = jtree1[i1, 0]
- current_old = jtree1[i1, 1]
- for i2 in range(nj):
- if jtree1[i2, 1] == current_new:
- current_new = max(current_new, jtree1[i2, 0])
- jtree1[i1, 0] = current_new
- for i1 in range(nj):
- joins[jtree1[i1, 0]].update([jtree1[i1, 1], jtree1[i1, 0]])
- updated = -1
- while updated != 0:
- keys = list(reversed(sorted(joins.keys())))
- updated = 0
- for k1 in keys + keys[::-1]:
- if k1 not in joins: continue
- s1 = joins[k1]
- for k2 in keys + keys[::-1]:
- if k2 >= k1: continue
- if k2 not in joins: continue
- s2 = joins[k2]
- if k2 in s1:
- s1.update(joins.pop(k2))
- updated += 1
- elif not s1.isdisjoint(s2):
- s1.update(joins.pop(k2))
- s1.update([k2])
- updated += 1
- tr = []
- for k in joins.keys():
- v = joins.pop(k)
- tr.append((k, np.array(list(v), dtype="int64")))
- return tr
-
def identify_contours(data_source, field, min_val, max_val,
cached_fields=None):
- cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
- pbar = get_pbar("First pass", len(data_source._grids))
- grids = sorted(data_source._grids, key=lambda g: -g.Level)
+ tree = amr_utils.ContourTree()
+ gct = amr_utils.TileContourTree(min_val, max_val)
total_contours = 0
- tree = []
- for gi,grid in enumerate(grids):
- pbar.update(gi+1)
- cm = data_source._get_cut_mask(grid)
- if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
- old_field_parameters = grid.field_parameters
- grid.field_parameters = data_source.field_parameters
- local_ind = np.where( (grid[field] > min_val)
- & (grid[field] < max_val) & cm )
- grid.field_parameters = old_field_parameters
- if local_ind[0].size == 0: continue
- kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
- grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
- grid["tempContours"][local_ind] = kk[:]
- cur_max_id -= local_ind[0].size
- xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
- cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
- fd_orig = grid["tempContours"].copy()
- xi = xi_u[cor_order]
- yi = yi_u[cor_order]
- zi = zi_u[cor_order]
- while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
- pass
- total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
- new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
- tree += zip(new_contours, new_contours)
- tree = set(tree)
+ contours = {}
+ empty_mask = np.ones((1,1,1), dtype="uint8")
+ node_ids = []
+ for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+ node.node_ind = len(node_ids)
+ nid = node.node_id
+ node_ids.append(nid)
+ values = g[field][sl].astype("float64")
+ contour_ids = np.zeros(dims, "int64") - 1
+ gct.identify_contours(values, contour_ids, total_contours)
+ new_contours = tree.cull_candidates(contour_ids)
+ total_contours += new_contours.shape[0]
+ tree.add_contours(new_contours)
+ # Now we can create a partitioned grid with the contours.
+ pg = amr_utils.PartitionedGrid(g.id,
+ [contour_ids.view("float64")],
+ empty_mask, g.dds * gi, g.dds * (gi + dims),
+ dims.astype("int64"))
+ contours[nid] = (g.Level, node.node_ind, pg, sl)
+ node_ids = np.array(node_ids)
+ trunk = data_source.tiles.tree.trunk
+ mylog.info("Linking node (%s) contours.", len(contours))
+ amr_utils.link_node_contours(trunk, contours, tree, node_ids)
+ mylog.info("Linked.")
+ #joins = tree.cull_joins(bt)
+ #tree.add_joins(joins)
+ joins = tree.export()
+ contour_ids = defaultdict(list)
+ pbar = get_pbar("Updating joins ... ", len(contours))
+ final_joins = np.unique(joins[:,1])
+ for i, nid in enumerate(sorted(contours)):
+ level, node_ind, pg, sl = contours[nid]
+ ff = pg.my_data[0].view("int64")
+ amr_utils.update_joins(joins, ff, final_joins)
+ contour_ids[pg.parent_grid_id].append((sl, ff))
+ pbar.update(i)
pbar.finish()
- pbar = get_pbar("Calculating joins ", len(data_source._grids))
- grid_set = set()
- for gi,grid in enumerate(grids):
- pbar.update(gi)
- cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
- grid_set.update(set(cg._grids))
- fd = cg["tempContours"].astype('int64')
- boundary_tree = amr_utils.construct_boundary_relationships(fd)
- tree.update(((a, b) for a, b in boundary_tree))
- pbar.finish()
- sort_new = np.array(list(tree), dtype='int64')
- mylog.info("Coalescing %s joins", sort_new.shape[0])
- joins = coalesce_join_tree(sort_new)
- #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
- pbar = get_pbar("Joining ", len(joins))
- # This process could and should be done faster
- print "Joining..."
- t1 = time.time()
- ff = data_source["tempContours"].astype("int64")
- amr_utils.update_joins(joins, ff)
- data_source["tempContours"] = ff.astype("float64")
- #for i, new in enumerate(sorted(joins.keys())):
- # pbar.update(i)
- # old_set = joins[new]
- # for old in old_set:
- # if old == new: continue
- # i1 = (data_source["tempContours"] == old)
- # data_source["tempContours"][i1] = new
- t2 = time.time()
- print "Finished joining in %0.2e seconds" % (t2-t1)
- pbar.finish()
- data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
- del data_source.field_data["tempContours"] # Force a reload from the grids
- data_source.get_data("tempContours")
- contour_ind = {}
- i = 0
- for contour_id in np.unique(data_source["tempContours"]):
- if contour_id == -1: continue
- contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
- mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
- i += 1
- mylog.info("Identified %s contours between %0.5e and %0.5e",
- len(contour_ind.keys()),min_val,max_val)
- for grid in chain(grid_set):
- grid.field_data.pop("tempContours", None)
- del data_source.field_data["tempContours"]
- return contour_ind
+ rv = dict()
+ rv.update(contour_ids)
+ # NOTE: Because joins can appear in both a "final join" and a subsequent
+ # "join", we can't know for sure how many unique joins there are without
+ # checking if no cells match or doing an expensive operation checking for
+ # the unique set of final join values.
+ return final_joins.size, rv
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -167,12 +167,12 @@
Parameters
----------
- axis : int
- The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
field : string
This is the field which will be "projected" along the axis. If
multiple are specified (in a list) they will all be projected in
the first pass.
+ axis : int
+ The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
weight_field : string
If supplied, the field being projected will be multiplied by this
weight value before being integrated, and at the conclusion of the
@@ -274,11 +274,12 @@
for chunk in self.data_source.chunks([], "io"):
self._initialize_chunk(chunk, tree)
# This needs to be parallel_objects-ified
- for chunk in parallel_objects(self.data_source.chunks(
- chunk_fields, "io")):
- mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
- get_memory_usage()/1024.)
- self._handle_chunk(chunk, fields, tree)
+ with self.data_source._field_parameter_state(self.field_parameters):
+ for chunk in parallel_objects(self.data_source.chunks(
+ chunk_fields, "io")):
+ mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+ get_memory_usage()/1024.)
+ self._handle_chunk(chunk, fields, tree)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
merge_style = -1
@@ -308,6 +309,7 @@
nvals *= convs[None,:]
# We now convert to half-widths and center-points
data = {}
+ #non_nan = ~np.any(np.isnan(nvals), axis=-1)
data['px'] = px
data['py'] = py
data['weight_field'] = nwvals
@@ -319,8 +321,9 @@
field_data = np.hsplit(data.pop('fields'), len(fields))
for fi, field in enumerate(fields):
mylog.debug("Setting field %s", field)
- self[field] = field_data[fi].ravel()
- for i in data.keys(): self[i] = data.pop(i)
+ self[field] = field_data[fi].ravel()#[non_nan]
+ for i in data.keys():
+ self[i] = data.pop(i)#[non_nan]
mylog.info("Projection completed")
def _initialize_chunk(self, chunk, tree):
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -33,6 +33,8 @@
ParallelAnalysisInterface
from yt.utilities.parameter_file_storage import \
ParameterFileStore
+from yt.utilities.amr_kdtree.api import \
+ AMRKDTree
from .derived_quantities import DerivedQuantityCollection
from .field_info_container import \
NeedsGridType, ValidateSpatial
@@ -365,6 +367,13 @@
return s
@contextmanager
+ def _field_parameter_state(self, field_parameters):
+ old_field_parameters = self.field_parameters
+ self.field_parameters = field_parameters
+ yield
+ self.field_parameters = old_field_parameters
+
+ @contextmanager
def _field_type_state(self, ftype, finfo, obj = None):
if obj is None: obj = self
old_particle_type = obj._current_particle_type
@@ -407,6 +416,14 @@
explicit_fields.append((ftype, fname))
return explicit_fields
+ _tree = None
+
+ @property
+ def tiles(self):
+ if self._tree is not None: return self._tree
+ self._tree = AMRKDTree(self.pf, data_source=self)
+ return self._tree
+
@property
def blocks(self):
for io_chunk in self.chunks([], "io"):
@@ -751,11 +768,13 @@
self._grids = None
self.quantities = DerivedQuantityCollection(self)
- def cut_region(self, field_cuts):
+ def cut_region(self, field_cuts, field_parameters = None):
"""
- Return an InLineExtractedRegion, where the grid cells are cut on the
- fly with a set of field_cuts. It is very useful for applying
- conditions to the fields in your data object.
+ Return an InLineExtractedRegion, where the object cells are cut on the
+ fly with a set of field_cuts. It is very useful for applying
+ conditions to the fields in your data object. Note that in previous
+ versions of yt, this accepted 'grid' as a variable, but presently it
+ requires 'obj'.
Examples
--------
@@ -763,19 +782,12 @@
>>> pf = load("RedshiftOutput0005")
>>> ad = pf.h.all_data()
- >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+ >>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
>>> print cr.quantities["TotalQuantity"]("CellMassMsun")
-
"""
- return YTValueCutExtractionBase(self, field_cuts)
-
- def extract_region(self, indices):
- """
- Return an ExtractedRegion where the points contained in it are defined
- as the points in `this` data object with the given *indices*.
- """
- fp = self.field_parameters.copy()
- return YTSelectedIndicesBase(self, indices, field_parameters = fp)
+ cr = self.pf.h.cut_region(self, field_cuts,
+ field_parameters = field_parameters)
+ return cr
def extract_isocontours(self, field, value, filename = None,
rescale = False, sample_values = None):
@@ -966,12 +978,15 @@
ff, mask, grid.LeftEdge, grid.dds)
def extract_connected_sets(self, field, num_levels, min_val, max_val,
- log_space=True, cumulative=True, cache=False):
+ log_space=True, cumulative=True):
"""
This function will create a set of contour objects, defined
by having connected cell structures, which can then be
studied and used to 'paint' their source grids, thus enabling
them to be plotted.
+
+ Note that this function *can* return a connected set object that has no
+ member values.
"""
if log_space:
cons = np.logspace(np.log10(min_val),np.log10(max_val),
@@ -979,8 +994,6 @@
else:
cons = np.linspace(min_val, max_val, num_levels+1)
contours = {}
- if cache: cached_fields = defaultdict(lambda: dict())
- else: cached_fields = None
for level in range(num_levels):
contours[level] = {}
if cumulative:
@@ -988,10 +1001,11 @@
else:
mv = cons[level+1]
from yt.analysis_modules.level_sets.api import identify_contours
- cids = identify_contours(self, field, cons[level], mv,
- cached_fields)
- for cid, cid_ind in cids.items():
- contours[level][cid] = self.extract_region(cid_ind)
+ nj, cids = identify_contours(self, field, cons[level], mv)
+ for cid in range(nj):
+ contours[level][cid] = self.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
return cons, contours
def paint_grids(self, field, value, default_value=None):
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -583,3 +583,85 @@
self.set_field_parameter('e0', e0)
self.set_field_parameter('e1', e1)
self.set_field_parameter('e2', e2)
+
+class YTCutRegionBase(YTSelectionContainer3D):
+ """
+ This is a data object designed to allow individuals to apply logical
+ operations to fields or particles and filter as a result of those cuts.
+
+ Parameters
+ ----------
+ base_object : YTSelectionContainer3D
+ The object to which cuts will be applied.
+ conditionals : list of strings
+ A list of conditionals that will be evaluated. In the namespace
+ available, these conditionals will have access to 'obj' which is a data
+ object of unknown shape, and they must generate a boolean array. For
+ instance, conditionals = ["obj['temperature'] < 1e3"]
+
+ Examples
+ --------
+
+ >>> pf = load("DD0010/moving7_0010")
+ >>> sp = pf.h.sphere("max", (1.0, 'mpc'))
+ >>> cr = pf.h.cut_region(sp, ["obj['temperature'] < 1e3"])
+ """
+ _type_name = "cut_region"
+ _con_args = ("base_object", "conditionals")
+ def __init__(self, base_object, conditionals, pf = None,
+ field_parameters = None):
+ super(YTCutRegionBase, self).__init__(base_object.center, pf, field_parameters)
+ self.conditionals = ensure_list(conditionals)
+ self.base_object = base_object
+ self._selector = None
+ # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
+ # ires and get_data
+
+ @property
+ def selector(self):
+ raise NotImplementedError
+
+ def chunks(self, fields, chunking_style, **kwargs):
+ # We actually want to chunk the sub-chunk, not ourselves. We have no
+ # chunks to speak of, as we do not data IO.
+ for chunk in self.hierarchy._chunk(self.base_object,
+ chunking_style,
+ **kwargs):
+ with self.base_object._chunked_read(chunk):
+ self.get_data(fields)
+ yield self
+
+ def get_data(self, fields = None):
+ fields = ensure_list(fields)
+ self.base_object.get_data(fields)
+ ind = self._cond_ind
+ for field in fields:
+ self.field_data[field] = self.base_object[field][ind]
+
+ @property
+ def _cond_ind(self):
+ ind = None
+ obj = self.base_object
+ with obj._field_parameter_state(self.field_parameters):
+ for cond in self.conditionals:
+ res = eval(cond)
+ if ind is None: ind = res
+ np.logical_and(res, ind, ind)
+ return ind
+
+ @property
+ def icoords(self):
+ return self.base_object.icoords[self._cond_ind,:]
+
+ @property
+ def fcoords(self):
+ return self.base_object.fcoords[self._cond_ind,:]
+
+ @property
+ def ires(self):
+ return self.base_object.ires[self._cond_ind]
+
+ @property
+ def fwidth(self):
+ return self.base_object.fwidth[self._cond_ind,:]
+
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -6,15 +6,14 @@
def test_cut_region():
# We decompose in different ways
- return #TESTDISABLED
for nprocs in [1, 2, 4, 8]:
pf = fake_random_pf(64, nprocs = nprocs,
fields = ("Density", "Temperature", "x-velocity"))
# We'll test two objects
dd = pf.h.all_data()
- r = dd.cut_region( [ "grid['Temperature'] > 0.5",
- "grid['Density'] < 0.75",
- "grid['x-velocity'] > 0.25" ])
+ r = dd.cut_region( [ "obj['Temperature'] > 0.5",
+ "obj['Density'] < 0.75",
+ "obj['x-velocity'] > 0.25" ])
t = ( (dd["Temperature"] > 0.5 )
& (dd["Density"] < 0.75 )
& (dd["x-velocity"] > 0.25 ) )
@@ -23,33 +22,21 @@
yield assert_equal, np.all(r["x-velocity"] > 0.25), True
yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+ r2 = r.cut_region( [ "obj['Temperature'] < 0.75" ] )
t2 = (r["Temperature"] < 0.75)
yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
yield assert_equal, np.all(r2["Temperature"] < 0.75), True
-def test_extract_region():
- # We decompose in different ways
- return #TESTDISABLED
- for nprocs in [1, 2, 4, 8]:
- pf = fake_random_pf(64, nprocs = nprocs,
- fields = ("Density", "Temperature", "x-velocity"))
- # We'll test two objects
+ # Now we can test some projections
dd = pf.h.all_data()
- t = ( (dd["Temperature"] > 0.5 )
- & (dd["Density"] < 0.75 )
- & (dd["x-velocity"] > 0.25 ) )
- r = dd.extract_region(t)
- yield assert_equal, np.all(r["Temperature"] > 0.5), True
- yield assert_equal, np.all(r["Density"] < 0.75), True
- yield assert_equal, np.all(r["x-velocity"] > 0.25), True
- yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
- yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- t2 = (r["Temperature"] < 0.75)
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
- yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
- yield assert_equal, np.all(r2["Temperature"] < 0.75), True
- t3 = (r["Temperature"] < 0.75)
- r3 = r.extract_region( t3 )
- yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
- yield assert_equal, np.all(r3["Temperature"] < 0.75), True
+ cr = dd.cut_region(["obj['Ones'] > 0"])
+ for weight in [None, "Density"]:
+ p1 = pf.h.proj("Density", 0, data_source=dd, weight_field=weight)
+ p2 = pf.h.proj("Density", 0, data_source=cr, weight_field=weight)
+ for f in p1.field_data:
+ yield assert_almost_equal, p1[f], p2[f]
+ cr = dd.cut_region(["obj['Density'] > 0.25"])
+ p2 = pf.h.proj("Density", 2, data_source=cr)
+ yield assert_equal, p2["Density"].max() > 0.25, True
+ p2 = pf.h.proj("Density", 2, data_source=cr, weight_field = "Density")
+ yield assert_equal, p2["Density"].max() > 0.25, True
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -582,12 +582,18 @@
units=r"\rm{s}^{-1}")
def _Contours(field, data):
- return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
- display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
- validators=[ValidateSpatial(0), ValidateGridType()],
- take_log=False, display_field=False)
+ fd = data.get_field_parameter("contour_slices")
+ vals = data["Ones"] * -1
+ if fd is None or fd == 0.0:
+ return vals
+ for sl, v in fd.get(data.id, []):
+ vals[sl] = v
+ return vals
+add_field("Contours", validators=[ValidateSpatial(0)],
+ take_log=False,
+ display_field=False,
+ projection_conversion="1",
+ function=_Contours)
def obtain_velocities(data):
return obtain_rv_vec(data)
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -688,7 +688,7 @@
if len(self.queue) == 0: raise StopIteration
chunk = YTDataChunk(None, "cache", self.queue, cache=False)
self.cache = self.geometry_handler.io._read_chunk_data(
- chunk, self.preload_fields)
+ chunk, self.preload_fields) or {}
g = self.queue.pop(0)
g._initialize_cache(self.cache.pop(g.id, {}))
return g
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -273,7 +273,9 @@
giter = sorted(gobjs, key = -g.Level)
elif sort is None:
giter = gobjs
- if self._preload_implemented and preload_fields is not None and ngz == 0:
+ if preload_fields is None: preload_fields = []
+ preload_fields, _ = self._split_fields(preload_fields)
+ if self._preload_implemented and len(preload_fields) > 0 and ngz == 0:
giter = ChunkDataCache(list(giter), preload_fields, self)
for i, og in enumerate(giter):
if ngz > 0:
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -184,6 +184,24 @@
for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
yield self.get_brick_data(node)
+ def slice_traverse(self, viewpoint = None):
+ if not hasattr(self.pf.h, "grid"):
+ raise NotImplementedError
+ for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+ grid = self.pf.h.grids[node.grid - self._id_offset]
+ dds = grid.dds
+ gle = grid.LeftEdge
+ nle = get_left_edge(node)
+ nre = get_right_edge(node)
+ li = np.rint((nle-gle)/dds).astype('int32')
+ ri = np.rint((nre-gle)/dds).astype('int32')
+ dims = (ri - li).astype('int32')
+ sl = (slice(li[0], ri[0]),
+ slice(li[1], ri[1]),
+ slice(li[2], ri[2]))
+ gi = grid.get_global_startindex() + li
+ yield grid, node, (sl, dims, gi)
+
def get_node(self, nodeid):
path = np.binary_repr(nodeid)
depth = 1
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -444,19 +444,28 @@
if new_result is None:
return
assert(len(new_result) == len(old_result))
+ nind, oind = None, None
for k in new_result:
assert (k in old_result)
+ if oind is None:
+ oind = np.array(np.isnan(old_result[k]))
+ np.logical_or(oind, np.isnan(old_result[k]), oind)
+ if nind is None:
+ nind = np.array(np.isnan(new_result[k]))
+ np.logical_or(nind, np.isnan(new_result[k]), nind)
+ oind = ~oind
+ nind = ~nind
for k in new_result:
err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
(k, self.field, self.weight_field, self.axis)
if k == 'weight_field' and self.weight_field is None:
continue
+ nres, ores = new_result[k][nind], old_result[k][oind]
if self.decimals is None:
- assert_equal(new_result[k], old_result[k],
- err_msg=err_msg)
+ assert_equal(nres, ores, err_msg=err_msg)
else:
- assert_allclose(new_result[k], old_result[k],
- 10.**-(self.decimals), err_msg=err_msg)
+ assert_allclose(nres, ores, 10.**-(self.decimals),
+ err_msg=err_msg)
class PixelizedProjectionValuesTest(AnswerTestingTest):
_type_name = "PixelizedProjectionValues"
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,9 +18,17 @@
cimport cython
from libc.stdlib cimport malloc, free
+from amr_kdtools cimport _find_node, Node
+from grid_traversal cimport VolumeContainer, PartitionedGrid, \
+ vc_index, vc_pos_index
+
cdef extern from "math.h":
double fabs(double x)
+cdef extern from "stdlib.h":
+ # NOTE that size_t might not be int
+ void *alloca(int)
+
cdef inline np.int64_t i64max(np.int64_t i0, np.int64_t i1):
if i0 > i1: return i0
return i1
@@ -29,87 +37,407 @@
if i0 < i1: return i0
return i1
-cdef extern from "union_find.h":
- ctypedef struct forest_node:
- void *value
- forest_node *parent
- int rank
+cdef struct ContourID
- forest_node* MakeSet(void* value)
- void Union(forest_node* node1, forest_node* node2)
- forest_node* Find(forest_node* node)
+cdef struct ContourID:
+ np.int64_t contour_id
+ ContourID *parent
+ ContourID *next
+ ContourID *prev
-ctypedef struct CellIdentifier:
- np.int64_t hindex
- int level
+cdef ContourID *contour_create(np.int64_t contour_id,
+ ContourID *prev = NULL):
+ node = <ContourID *> malloc(sizeof(ContourID))
+ #print "Creating contour with id", contour_id
+ node.contour_id = contour_id
+ node.next = node.parent = NULL
+ node.prev = prev
+ if prev != NULL: prev.next = node
+ return node
-cdef class GridContourContainer:
- cdef np.int64_t dims[3]
- cdef np.int64_t start_indices[3]
- cdef forest_node **join_tree
- cdef np.int64_t ncells
+cdef void contour_delete(ContourID *node):
+ if node.prev != NULL: node.prev.next = node.next
+ if node.next != NULL: node.next.prev = node.prev
+ free(node)
- def __init__(self, dimensions, indices):
- cdef int i
- self.ncells = 1
- for i in range(3):
- self.ncells *= dimensions[i]
- self.dims[i] = dimensions[i]
- self.start_indices[i] = indices[i]
- self.join_tree = <forest_node **> malloc(sizeof(forest_node)
- * self.ncells)
- for i in range(self.ncells): self.join_tree[i] = NULL
+cdef ContourID *contour_find(ContourID *node):
+ cdef ContourID *temp, *root
+ root = node
+ while root.parent != NULL and root.parent != root:
+ root = root.parent
+ if root == root.parent: root.parent = NULL
+ while node.parent != NULL:
+ temp = node.parent
+ node.parent = root
+ node = temp
+ return root
+cdef void contour_union(ContourID *node1, ContourID *node2):
+ if node1.contour_id < node2.contour_id:
+ node2.parent = node1
+ elif node2.contour_id < node1.contour_id:
+ node1.parent = node2
+
+cdef struct CandidateContour
+
+cdef struct CandidateContour:
+ np.int64_t contour_id
+ np.int64_t join_id
+ CandidateContour *next
+
+cdef int candidate_contains(CandidateContour *first,
+ np.int64_t contour_id,
+ np.int64_t join_id = -1):
+ while first != NULL:
+ if first.contour_id == contour_id \
+ and first.join_id == join_id: return 1
+ first = first.next
+ return 0
+
+cdef CandidateContour *candidate_add(CandidateContour *first,
+ np.int64_t contour_id,
+ np.int64_t join_id = -1):
+ cdef CandidateContour *node
+ node = <CandidateContour *> malloc(sizeof(CandidateContour))
+ node.contour_id = contour_id
+ node.join_id = join_id
+ node.next = first
+ return node
+
+cdef class ContourTree:
+ # This class is essentially a Union-Find algorithm. What we want to do is
+ # to, given a connection between two objects, identify the unique ID for
+ # those two objects. So what we have is a collection of contours, and they
+ # eventually all get joined and contain lots of individual IDs. But it's
+ # easy to find the *first* contour, i.e., the primary ID, for each of the
+ # subsequent IDs.
+ #
+ # This means that we can connect id 202483 to id 2472, and if id 2472 is
+ # connected to id 143, the connection will *actually* be from 202483 to
+ # 143. In this way we can speed up joining things and knowing their
+ # "canonical" id.
+ #
+ # This is a multi-step process, since we first want to connect all of the
+ # contours, then we end up wanting to coalesce them, and ultimately we join
+ # them at the end. The join produces a table that maps the initial to the
+ # final, and we can go through and just update all of those.
+ cdef ContourID *first
+ cdef ContourID *last
+
+ def clear(self):
+ # Here, we wipe out ALL of our contours, but not the pointers to them
+ cdef ContourID *cur, *next
+ cur = self.first
+ while cur != NULL:
+ next = cur.next
+ free(cur)
+ cur = next
+ self.first = self.last = NULL
+
+ def __init__(self):
+ self.first = self.last = NULL
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
+ # This adds new contours, from the given contour IDs, to the tree.
+ # Each one can be connected to a parent, as well as to next/prev in the
+ # set of contours belonging to this tree.
+ cdef int i, n
+ n = contour_ids.shape[0]
+ cdef ContourID *cur = self.last
+ for i in range(n):
+ #print i, contour_ids[i]
+ cur = contour_create(contour_ids[i], cur)
+ if self.first == NULL: self.first = cur
+ self.last = cur
+
+ def add_contour(self, np.int64_t contour_id):
+ self.last = contour_create(contour_id, self.last)
+
+ def cull_candidates(self, np.ndarray[np.int64_t, ndim=3] candidates):
+ # This function looks at each preliminary contour ID belonging to a
+ # given collection of values, and then if need be it creates a new
+ # contour for it.
+ cdef int i, j, k, ni, nj, nk, nc
+ cdef CandidateContour *first = NULL
+ cdef CandidateContour *temp
+ cdef np.int64_t cid
+ nc = 0
+ ni = candidates.shape[0]
+ nj = candidates.shape[1]
+ nk = candidates.shape[2]
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ cid = candidates[i,j,k]
+ if cid == -1: continue
+ if candidate_contains(first, cid) == 0:
+ nc += 1
+ first = candidate_add(first, cid)
+ cdef np.ndarray[np.int64_t, ndim=1] contours
+ contours = np.empty(nc, dtype="int64")
+ i = 0
+ # This removes all the temporary contours for this set of contours and
+ # instead constructs a final list of them.
+ while first != NULL:
+ contours[i] = first.contour_id
+ i += 1
+ temp = first.next
+ free(first)
+ first = temp
+ return contours
+
+ def cull_joins(self, np.ndarray[np.int64_t, ndim=2] cjoins):
+ # This coalesces contour IDs, so that we have only the final name
+ # resolutions -- the .join_id from a candidate. So many items will map
+ # to a single join_id.
+ cdef int i, j, k, ni, nj, nk, nc
+ cdef CandidateContour *first = NULL
+ cdef CandidateContour *temp
+ cdef np.int64_t cid1, cid2
+ nc = 0
+ ni = cjoins.shape[0]
+ for i in range(ni):
+ cid1 = cjoins[i,0]
+ cid2 = cjoins[i,1]
+ if cid1 == -1: continue
+ if cid2 == -1: continue
+ if candidate_contains(first, cid1, cid2) == 0:
+ nc += 1
+ first = candidate_add(first, cid1, cid2)
+ cdef np.ndarray[np.int64_t, ndim=2] contours
+ contours = np.empty((nc,2), dtype="int64")
+ i = 0
+ while first != NULL:
+ contours[i,0] = first.contour_id
+ contours[i,1] = first.join_id
+ i += 1
+ temp = first.next
+ free(first)
+ first = temp
+ return contours
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
+ cdef int i, n, ins
+ cdef np.int64_t cid1, cid2
+ # Okay, this requires lots of iteration, unfortunately
+ cdef ContourID *cur, *root
+ n = join_tree.shape[0]
+ #print "Counting"
+ #print "Checking", self.count()
+ for i in range(n):
+ ins = 0
+ cid1 = join_tree[i, 0]
+ cid2 = join_tree[i, 1]
+ c1 = c2 = NULL
+ cur = self.first
+ #print "Looking for ", cid1, cid2
+ while c1 == NULL or c2 == NULL:
+ if cur.contour_id == cid1:
+ c1 = contour_find(cur)
+ if cur.contour_id == cid2:
+ c2 = contour_find(cur)
+ ins += 1
+ cur = cur.next
+ if cur == NULL: break
+ if c1 == NULL or c2 == NULL:
+ if c1 == NULL: print " Couldn't find ", cid1
+ if c2 == NULL: print " Couldn't find ", cid2
+ print " Inspected ", ins
+ raise RuntimeError
+ else:
+ contour_union(c1, c2)
+
+ def count(self):
+ cdef int n = 0
+ cdef ContourID *cur = self.first
+ while cur != NULL:
+ cur = cur.next
+ n += 1
+ return n
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def export(self):
+ cdef int n = self.count()
+ cdef ContourID *cur, *root
+ cur = self.first
+ cdef np.ndarray[np.int64_t, ndim=2] joins
+ joins = np.empty((n, 2), dtype="int64")
+ n = 0
+ while cur != NULL:
+ root = contour_find(cur)
+ joins[n, 0] = cur.contour_id
+ joins[n, 1] = root.contour_id
+ cur = cur.next
+ n += 1
+ return joins
+
def __dealloc__(self):
- cdef int i
- for i in range(self.ncells):
- if self.join_tree[i] != NULL: free(self.join_tree[i])
- free(self.join_tree)
+ self.clear()
- #def construct_join_tree(self,
- # np.ndarray[np.float64_t, ndim=3] field,
- # np.ndarray[np.bool_t, ndim=3] mask):
- # # This only looks at the components of the grid that are actually
- # # inside this grid -- boundary conditions are handled later.
- # pass
+cdef class TileContourTree:
+ cdef np.float64_t min_val
+ cdef np.float64_t max_val
-#@cython.boundscheck(False)
-#@cython.wraparound(False)
-def construct_boundary_relationships(
- np.ndarray[dtype=np.int64_t, ndim=3] contour_ids):
- # We only look at the boundary and one cell in
- cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj
+ def __init__(self, np.float64_t min_val, np.float64_t max_val):
+ self.min_val = min_val
+ self.max_val = max_val
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def identify_contours(self, np.ndarray[np.float64_t, ndim=3] values,
+ np.ndarray[np.int64_t, ndim=3] contour_ids,
+ np.int64_t start):
+ # This just looks at neighbor values and tries to identify which zones
+ # are touching by face within a given brick.
+ cdef int i, j, k, ni, nj, nk, offset
+ cdef int off_i, off_j, off_k, oi, ok, oj
+ cdef ContourID *cur = NULL
+ cdef ContourID *c1, *c2
+ cdef np.float64_t v
+ cdef np.int64_t nc
+ ni = values.shape[0]
+ nj = values.shape[1]
+ nk = values.shape[2]
+ nc = 0
+ cdef ContourID **container = <ContourID**> malloc(
+ sizeof(ContourID*)*ni*nj*nk)
+ for i in range(ni*nj*nk): container[i] = NULL
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ v = values[i,j,k]
+ if v < self.min_val or v > self.max_val: continue
+ nc += 1
+ c1 = contour_create(nc + start)
+ cur = container[i*nj*nk + j*nk + k] = c1
+ for oi in range(3):
+ off_i = oi - 1 + i
+ if not (0 <= off_i < ni): continue
+ for oj in range(3):
+ off_j = oj - 1 + j
+ if not (0 <= off_j < nj): continue
+ for ok in range(3):
+ if oi == oj == ok == 1: continue
+ if off_k > k and off_j > j and off_i > i:
+ continue
+ off_k = ok - 1 + k
+ if not (0 <= off_k < nk): continue
+ offset = off_i*nj*nk + off_j*nk + off_k
+ c2 = container[offset]
+ if c2 == NULL: continue
+ c2 = contour_find(c2)
+ contour_union(cur, c2)
+ cur = contour_find(cur)
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ c1 = container[i*nj*nk + j*nk + k]
+ if c1 == NULL: continue
+ cur = c1
+ c1 = contour_find(c1)
+ contour_ids[i,j,k] = c1.contour_id
+
+ for i in range(ni*nj*nk):
+ if container[i] != NULL: free(container[i])
+ free(container)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def link_node_contours(Node trunk, contours, ContourTree tree,
+ np.ndarray[np.int64_t, ndim=1] node_ids):
+ cdef int n_nodes = node_ids.shape[0]
+ cdef np.int64_t node_ind
+ cdef VolumeContainer **vcs = <VolumeContainer **> malloc(
+ sizeof(VolumeContainer*) * n_nodes)
+ cdef int i
+ cdef PartitionedGrid pg
+ for i in range(n_nodes):
+ pg = contours[node_ids[i]][2]
+ vcs[i] = pg.container
+ cdef np.ndarray[np.uint8_t] examined = np.zeros(n_nodes, "uint8")
+ for nid, cinfo in sorted(contours.items(), key = lambda a: -a[1][0]):
+ level, node_ind, pg, sl = cinfo
+ construct_boundary_relationships(trunk, tree, node_ind,
+ examined, vcs, node_ids)
+ examined[node_ind] = 1
+
+cdef inline void get_spos(VolumeContainer *vc, int i, int j, int k,
+ int axis, np.float64_t *spos):
+ spos[0] = vc.left_edge[0] + i * vc.dds[0]
+ spos[1] = vc.left_edge[1] + j * vc.dds[1]
+ spos[2] = vc.left_edge[2] + k * vc.dds[2]
+ spos[axis] += 0.5 * vc.dds[axis]
+
+cdef inline int spos_contained(VolumeContainer *vc, np.float64_t *spos):
+ cdef int i
+ for i in range(3):
+ if spos[i] < vc.left_edge[i] or spos[i] > vc.right_edge[i]: return 0
+ return 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void construct_boundary_relationships(Node trunk, ContourTree tree,
+ np.int64_t nid, np.ndarray[np.uint8_t, ndim=1] examined,
+ VolumeContainer **vcs,
+ np.ndarray[np.int64_t, ndim=1] node_ids):
+ # We only look at the boundary and find the nodes next to it.
+ # Contours is a dict, keyed by the node.id.
+ cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
cdef np.int64_t c1, c2
- nx = contour_ids.shape[0]
- ny = contour_ids.shape[1]
- nz = contour_ids.shape[2]
+ cdef Node adj_node
+ cdef VolumeContainer *vc1, *vc0 = vcs[nid]
+ nx = vc0.dims[0]
+ ny = vc0.dims[1]
+ nz = vc0.dims[2]
+ cdef int s = (ny*nx + nx*nz + ny*nz) * 18
# We allocate an array of fixed (maximum) size
- cdef int s = (ny*nx + nx*nz + ny*nz - 2) * 18
- cdef np.ndarray[np.int64_t, ndim=2] tree = np.zeros((s, 2), dtype="int64")
+ cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
cdef int ti = 0
- # First x-pass
+ cdef int index
+ cdef np.float64_t spos[3]
+
+ # First the x-pass
for i in range(ny):
for j in range(nz):
for offset_i in range(3):
oi = offset_i - 1
- if i == 0 and oi == -1: continue
- if i == ny - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == nz - 1 and oj == 1: continue
- c1 = contour_ids[0, i, j]
- c2 = contour_ids[1, i + oi, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[nx-1, i, j]
- c2 = contour_ids[nx-2, i + oi, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
+ # Adjust by -1 in x, then oi and oj in y and z
+ get_spos(vc0, -1, i + oi, j + oj, 0, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, 0, i, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ # This is outside our vc
+ get_spos(vc0, nx, i + oi, j + oj, 0, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, nx - 1, i, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
# Now y-pass
for i in range(nx):
for j in range(nz):
@@ -119,43 +447,75 @@
if i == nx - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == nz - 1 and oj == 1: continue
- c1 = contour_ids[i, 0, j]
- c2 = contour_ids[i + oi, 1, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[i, ny-1, j]
- c2 = contour_ids[i + oi, ny-2, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
+ get_spos(vc0, i + oi, -1, j + oj, 1, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, 0, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ get_spos(vc0, i + oi, ny, j + oj, 1, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, ny - 1, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ # Now z-pass
for i in range(nx):
for j in range(ny):
for offset_i in range(3):
oi = offset_i - 1
- if i == 0 and oi == -1: continue
- if i == nx - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == ny - 1 and oj == 1: continue
- c1 = contour_ids[i, j, 0]
- c2 = contour_ids[i + oi, j + oj, 1]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[i, j, nz-1]
- c2 = contour_ids[i + oi, j + oj, nz-2]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- return tree[:ti,:]
+ get_spos(vc0, i + oi, j + oj, -1, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, 0)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ get_spos(vc0, i + oi, j + oj, nz, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, nz - 1)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ if ti == 0: return
+ new_joins = tree.cull_joins(joins[:ti,:])
+ tree.add_joins(new_joins)
cdef inline int are_neighbors(
np.float64_t x1, np.float64_t y1, np.float64_t z1,
@@ -228,16 +588,23 @@
@cython.boundscheck(False)
@cython.wraparound(False)
-def update_joins(joins, np.ndarray[np.int64_t, ndim=1] contour_ids):
- cdef np.int64_t new, old, i, oi
- cdef int n, on
- cdef np.ndarray[np.int64_t, ndim=1] old_set
- #print contour_ids.shape[0]
- n = contour_ids.shape[0]
- for new, old_set in joins:
- #print new
- on = old_set.shape[0]
- for i in range(n):
- for oi in range(on):
- old = old_set[oi]
- if contour_ids[i] == old: contour_ids[i] = new
+def update_joins(np.ndarray[np.int64_t, ndim=2] joins,
+ np.ndarray[np.int64_t, ndim=3] contour_ids,
+ np.ndarray[np.int64_t, ndim=1] final_joins):
+ cdef np.int64_t new, old
+ cdef int i, j, nj, nf
+ cdef int ci, cj, ck
+ nj = joins.shape[0]
+ nf = final_joins.shape[0]
+ for ci in range(contour_ids.shape[0]):
+ for cj in range(contour_ids.shape[1]):
+ for ck in range(contour_ids.shape[2]):
+ if contour_ids[ci,cj,ck] == -1: continue
+ for j in range(nj):
+ if contour_ids[ci,cj,ck] == joins[j,0]:
+ contour_ids[ci,cj,ck] = joins[j,1]
+ break
+ for j in range(nf):
+ if contour_ids[ci,cj,ck] == final_joins[j]:
+ contour_ids[ci,cj,ck] = j + 1
+ break
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/utilities/lib/amr_kdtools.pxd
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -0,0 +1,39 @@
+"""
+AMR kD-Tree Cython Tools
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+
+cdef struct Split:
+ int dim
+ np.float64_t pos
+
+cdef class Node
+
+cdef class Node:
+ cdef public Node left
+ cdef public Node right
+ cdef public Node parent
+ cdef public int grid
+ cdef public np.int64_t node_id
+ cdef public np.int64_t node_ind
+ cdef np.float64_t left_edge[3]
+ cdef np.float64_t right_edge[3]
+ cdef public data
+ cdef Split * split
+ cdef int level
+
+cdef int point_in_node(Node node, np.ndarray[np.float64_t, ndim=1] point)
+cdef Node _find_node(Node node, np.float64_t *point)
+cdef int _kd_is_leaf(Node node)
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -25,25 +25,11 @@
DEF Nch = 4
-cdef struct Split:
- int dim
- np.float64_t pos
-
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
cdef class Node:
- cdef public Node left
- cdef public Node right
- cdef public Node parent
- cdef public int grid
- cdef public np.int64_t node_id
- cdef np.float64_t left_edge[3]
- cdef np.float64_t right_edge[3]
- cdef public data
- cdef Split * split
-
def __cinit__(self,
Node parent,
Node left,
@@ -152,11 +138,11 @@
def kd_traverse(Node trunk, viewpoint=None):
if viewpoint is None:
for node in depth_traverse(trunk):
- if kd_is_leaf(node) and node.grid != -1:
+ if _kd_is_leaf(node) == 1 and node.grid != -1:
yield node
else:
for node in viewpoint_traverse(trunk, viewpoint):
- if kd_is_leaf(node) and node.grid != -1:
+ if _kd_is_leaf(node) == 1 and node.grid != -1:
yield node
@cython.boundscheck(False)
@@ -172,7 +158,7 @@
if not should_i_build(node, rank, size):
return
- if kd_is_leaf(node):
+ if _kd_is_leaf(node) == 1:
insert_grid(node, gle, gre, gid, rank, size)
else:
less_id = gle[node.split.dim] < node.split.pos
@@ -295,7 +281,7 @@
if not should_i_build(node, rank, size):
return
- if kd_is_leaf(node):
+ if _kd_is_leaf(node) == 1:
insert_grids(node, ngrids, gles, gres, gids, rank, size)
return
@@ -766,11 +752,16 @@
assert has_l_child == has_r_child
return has_l_child
+cdef int _kd_is_leaf(Node node):
+ if node.left is None or node.right is None:
+ return 1
+ return 0
+
def step_depth(Node current, Node previous):
'''
Takes a single step in the depth-first traversal
'''
- if kd_is_leaf(current): # At a leaf, move back up
+ if _kd_is_leaf(current) == 1: # At a leaf, move back up
previous = current
current = current.parent
@@ -862,7 +853,7 @@
Takes a single step in the viewpoint based traversal. Always
goes to the node furthest away from viewpoint first.
'''
- if kd_is_leaf(current): # At a leaf, move back up
+ if _kd_is_leaf(current) == 1: # At a leaf, move back up
previous = current
current = current.parent
elif current.split.dim is None: # This is a dead node
@@ -913,6 +904,13 @@
inside *= node.right_edge[i] > point[i]
return inside
+cdef Node _find_node(Node node, np.float64_t *point):
+ while _kd_is_leaf(node) == 0:
+ if point[node.split.dim] < node.split.pos:
+ node = node.left
+ else:
+ node = node.right
+ return node
def find_node(Node node,
np.ndarray[np.float64_t, ndim=1] point):
@@ -920,12 +918,5 @@
Find the AMRKDTree node enclosing a position
"""
assert(point_in_node(node, point))
- while not kd_is_leaf(node):
- if point[node.split.dim] < node.split.pos:
- node = node.left
- else:
- node = node.right
- return node
+ return _find_node(node, <np.float64_t *> point.data)
-
-
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -17,6 +17,7 @@
import numpy as np
cimport numpy as np
cimport cython
+cimport kdtree_utils
cdef struct VolumeContainer:
int n_fields
@@ -29,6 +30,20 @@
np.float64_t idds[3]
int dims[3]
+cdef class PartitionedGrid:
+ cdef public object my_data
+ cdef public object source_mask
+ cdef public object LeftEdge
+ cdef public object RightEdge
+ cdef public int parent_grid_id
+ cdef VolumeContainer *container
+ cdef kdtree_utils.kdtree *star_list
+ cdef np.float64_t star_er
+ cdef np.float64_t star_sigma_num
+ cdef np.float64_t star_coeff
+ cdef void get_vector_field(self, np.float64_t pos[3],
+ np.float64_t *vel, np.float64_t *vel_mag)
+
ctypedef void sample_function(
VolumeContainer *vc,
np.float64_t v_pos[3],
@@ -45,3 +60,12 @@
void *data,
np.float64_t *return_t = *,
np.float64_t enter_t = *) nogil
+
+cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
+ return (i*vc.dims[1]+j)*vc.dims[2]+k
+
+cdef inline int vc_pos_index(VolumeContainer *vc, np.float64_t *spos):
+ cdef int i, index[3]
+ for i in range(3):
+ index[i] = <int> ((spos[i] - vc.left_edge[i]) * vc.idds[i])
+ return vc_index(vc, index[0], index[1], index[2])
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -16,7 +16,6 @@
import numpy as np
cimport numpy as np
cimport cython
-cimport kdtree_utils
#cimport healpix_interface
from libc.stdlib cimport malloc, free, abs
from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -58,16 +57,6 @@
void *data) nogil
cdef class PartitionedGrid:
- cdef public object my_data
- cdef public object source_mask
- cdef public object LeftEdge
- cdef public object RightEdge
- cdef public int parent_grid_id
- cdef VolumeContainer *container
- cdef kdtree_utils.kdtree *star_list
- cdef np.float64_t star_er
- cdef np.float64_t star_sigma_num
- cdef np.float64_t star_coeff
@cython.boundscheck(False)
@cython.wraparound(False)
diff -r d0546b0492408854ce4e133c0c2aed4a5a15ce75 -r 38fff02747cadf3674047f662f325756143dd225 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -65,7 +65,9 @@
["yt/utilities/lib/ContourFinding.pyx",
"yt/utilities/lib/union_find.c"],
include_dirs=["yt/utilities/lib/"],
- libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+ libraries=["m"],
+ depends=["yt/utilities/lib/fp_utils.pxd",
+ "yt/utilities/lib/amr_kdtools.pxd"])
config.add_extension("DepthFirstOctree",
["yt/utilities/lib/DepthFirstOctree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
https://bitbucket.org/yt_analysis/yt/commits/4a6cd04dadf2/
Changeset: 4a6cd04dadf2
Branch: yt-3.0
User: MatthewTurk
Date: 2013-12-02 03:31:11
Summary: Merging heads
Affected #: 4 files
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
if isinstance(arg, types.StringTypes):
if os.path.exists(arg):
valid_file.append(True)
+ elif arg.startswith("http"):
+ valid_file.append(True)
else:
if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
valid_file.append(True)
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
- if not os.path.exists(apath): raise IOError(filename)
+ #if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
if obj._skip_cache is False:
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -20,6 +20,7 @@
import weakref
import struct
import glob
+import time
import os
from yt.utilities.fortran_utils import read_record
@@ -50,6 +51,11 @@
particle_deposition_functions, \
standard_particle_fields
+try:
+ import requests
+ import json
+except ImportError:
+ requests = None
class ParticleFile(object):
def __init__(self, pf, io, filename, file_id):
@@ -562,3 +568,79 @@
def _is_valid(self, *args, **kwargs):
# We do not allow load() of these files.
return False
+
+class HTTPParticleFile(ParticleFile):
+ pass
+
+class HTTPStreamStaticOutput(ParticleStaticOutput):
+ _hierarchy_class = ParticleGeometryHandler
+ _file_class = HTTPParticleFile
+ _fieldinfo_fallback = GadgetFieldInfo
+ _fieldinfo_known = KnownGadgetFields
+ _particle_mass_name = "Mass"
+ _particle_coordinates_name = "Coordinates"
+ _particle_velocity_name = "Velocities"
+ filename_template = ""
+
+ def __init__(self, base_url,
+ data_style = "http_particle_stream",
+ n_ref = 64, over_refine_factor=1):
+ if requests is None:
+ raise RuntimeError
+ self.base_url = base_url
+ self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
+ super(HTTPStreamStaticOutput, self).__init__("", data_style)
+
+ def __repr__(self):
+ return self.base_url
+
+ def _parse_parameter_file(self):
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.parameters["HydroMethod"] = "sph"
+
+ # Here's where we're going to grab the JSON index file
+ hreq = requests.get(self.base_url + "/yt_index.json")
+ if hreq.status_code != 200:
+ raise RuntimeError
+ header = json.loads(hreq.content)
+ header['particle_count'] = dict((int(k), header['particle_count'][k])
+ for k in header['particle_count'])
+ self.parameters = header
+
+ # Now we get what we need
+ self.domain_left_edge = np.array(header['domain_left_edge'], "float64")
+ self.domain_right_edge = np.array(header['domain_right_edge'], "float64")
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
+ self.periodicity = (True, True, True)
+
+ self.current_time = header['current_time']
+ self.unique_identifier = header.get("unique_identifier", time.time())
+ self.cosmological_simulation = int(header['cosmological_simulation'])
+ for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
+ 'hubble_constant'):
+ setattr(self, attr, float(header[attr]))
+
+ self.file_count = header['num_files']
+
+ def _set_units(self):
+ length_unit = float(self.parameters['units']['length'])
+ time_unit = float(self.parameters['units']['time'])
+ mass_unit = float(self.parameters['units']['mass'])
+ density_unit = mass_unit / length_unit ** 3
+ velocity_unit = length_unit / time_unit
+ self._unit_base = {}
+ self._unit_base['cm'] = 1.0/length_unit
+ self._unit_base['s'] = 1.0/time_unit
+ super(HTTPStreamStaticOutput, self)._set_units()
+ self.conversion_factors["velocity"] = velocity_unit
+ self.conversion_factors["mass"] = mass_unit
+ self.conversion_factors["density"] = density_unit
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ if args[0].startswith("http://"):
+ return True
+ return False
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -28,6 +28,11 @@
from yt.geometry.oct_container import _ORDER_MAX
+try:
+ import requests
+except ImportError:
+ requests = None
+
CHUNKSIZE = 10000000
def _get_h5_handle(fn):
@@ -543,3 +548,90 @@
size = self._pdtypes[ptype].itemsize
pos += data_file.total_particles[ptype] * size
return field_offsets
+
+class IOHandlerHTTPStream(BaseIOHandler):
+ _data_style = "http_particle_stream"
+ _vector_fields = ("Coordinates", "Velocity", "Velocities")
+
+ def __init__(self, pf):
+ if requests is None:
+ raise RuntimeError
+ self._url = pf.base_url
+ # This should eventually manage the IO and cache it
+ self.total_bytes = 0
+ super(IOHandlerHTTPStream, self).__init__(pf)
+
+ def _open_stream(self, data_file, field):
+ # This does not actually stream yet!
+ ftype, fname = field
+ s = "%s/%s/%s/%s" % (self._url,
+ data_file.file_id, ftype, fname)
+ mylog.info("Loading URL %s", s)
+ resp = requests.get(s)
+ if resp.status_code != 200:
+ raise RuntimeError
+ self.total_bytes += len(resp.content)
+ return resp.content
+
+ def _identify_fields(self, data_file):
+ f = []
+ for ftype, fname in self.pf.parameters["field_list"]:
+ f.append((str(ftype), str(fname)))
+ return f
+
+ def _read_particle_coords(self, chunks, ptf):
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype in ptf:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ yield ptype, (c[:,0], c[:,1], c[:,2])
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype, field_list in sorted(ptf.items()):
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ mask = selector.select_points(
+ c[:,0], c[:,1], c[:,2])
+ del c
+ if mask is None: continue
+ for field in field_list:
+ s = self._open_stream(data_file, (ptype, field))
+ c = np.frombuffer(s, dtype="float64")
+ if field in self._vector_fields:
+ c.shape = (c.shape[0]/3.0, 3)
+ data = c[mask, ...]
+ yield (ptype, field), data
+
+ def _initialize_index(self, data_file, regions):
+ header = self.pf.parameters
+ ptypes = header["particle_count"][data_file.file_id].keys()
+ pcount = sum(header["particle_count"][data_file.file_id].values())
+ morton = np.empty(pcount, dtype='uint64')
+ ind = 0
+ for ptype in ptypes:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ regions.add_data_file(c, data_file.file_id)
+ morton[ind:ind+c.shape[0]] = compute_morton(
+ c[:,0], c[:,1], c[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ ind += c.shape[0]
+ return morton
+
+ def _count_particles(self, data_file):
+ return self.pf.parameters["particle_count"][data_file.file_id]
https://bitbucket.org/yt_analysis/yt/commits/761425f895bc/
Changeset: 761425f895bc
Branch: yt-3.0
User: MatthewTurk
Date: 2013-12-02 03:31:34
Summary: Merging contour updates.
Affected #: 18 files
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -14,7 +14,6 @@
#-----------------------------------------------------------------------------
from .contour_finder import \
- coalesce_join_tree, \
identify_contours
from .clump_handling import \
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -107,10 +107,11 @@
print "Wiping out existing children clumps."
self.children = []
if max_val is None: max_val = self.max_val
- contour_info = identify_contours(self.data, self.field, min_val, max_val,
- self.cached_fields)
- for cid in contour_info:
- new_clump = self.data.extract_region(contour_info[cid])
+ nj, cids = identify_contours(self.data, self.field, min_val, max_val)
+ for cid in range(nj):
+ new_clump = self.data.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
self.children.append(Clump(new_clump, self, self.field,
self.cached_fields,function=self.function,
clump_info=self.clump_info))
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -20,120 +20,52 @@
import yt.utilities.data_point_utilities as data_point_utilities
import yt.utilities.lib as amr_utils
-def coalesce_join_tree(jtree1):
- joins = defaultdict(set)
- nj = jtree1.shape[0]
- for i1 in range(nj):
- current_new = jtree1[i1, 0]
- current_old = jtree1[i1, 1]
- for i2 in range(nj):
- if jtree1[i2, 1] == current_new:
- current_new = max(current_new, jtree1[i2, 0])
- jtree1[i1, 0] = current_new
- for i1 in range(nj):
- joins[jtree1[i1, 0]].update([jtree1[i1, 1], jtree1[i1, 0]])
- updated = -1
- while updated != 0:
- keys = list(reversed(sorted(joins.keys())))
- updated = 0
- for k1 in keys + keys[::-1]:
- if k1 not in joins: continue
- s1 = joins[k1]
- for k2 in keys + keys[::-1]:
- if k2 >= k1: continue
- if k2 not in joins: continue
- s2 = joins[k2]
- if k2 in s1:
- s1.update(joins.pop(k2))
- updated += 1
- elif not s1.isdisjoint(s2):
- s1.update(joins.pop(k2))
- s1.update([k2])
- updated += 1
- tr = []
- for k in joins.keys():
- v = joins.pop(k)
- tr.append((k, np.array(list(v), dtype="int64")))
- return tr
-
def identify_contours(data_source, field, min_val, max_val,
cached_fields=None):
- cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
- pbar = get_pbar("First pass", len(data_source._grids))
- grids = sorted(data_source._grids, key=lambda g: -g.Level)
+ tree = amr_utils.ContourTree()
+ gct = amr_utils.TileContourTree(min_val, max_val)
total_contours = 0
- tree = []
- for gi,grid in enumerate(grids):
- pbar.update(gi+1)
- cm = data_source._get_cut_mask(grid)
- if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
- old_field_parameters = grid.field_parameters
- grid.field_parameters = data_source.field_parameters
- local_ind = np.where( (grid[field] > min_val)
- & (grid[field] < max_val) & cm )
- grid.field_parameters = old_field_parameters
- if local_ind[0].size == 0: continue
- kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
- grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
- grid["tempContours"][local_ind] = kk[:]
- cur_max_id -= local_ind[0].size
- xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
- cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
- fd_orig = grid["tempContours"].copy()
- xi = xi_u[cor_order]
- yi = yi_u[cor_order]
- zi = zi_u[cor_order]
- while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
- pass
- total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
- new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
- tree += zip(new_contours, new_contours)
- tree = set(tree)
+ contours = {}
+ empty_mask = np.ones((1,1,1), dtype="uint8")
+ node_ids = []
+ for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+ node.node_ind = len(node_ids)
+ nid = node.node_id
+ node_ids.append(nid)
+ values = g[field][sl].astype("float64")
+ contour_ids = np.zeros(dims, "int64") - 1
+ gct.identify_contours(values, contour_ids, total_contours)
+ new_contours = tree.cull_candidates(contour_ids)
+ total_contours += new_contours.shape[0]
+ tree.add_contours(new_contours)
+ # Now we can create a partitioned grid with the contours.
+ pg = amr_utils.PartitionedGrid(g.id,
+ [contour_ids.view("float64")],
+ empty_mask, g.dds * gi, g.dds * (gi + dims),
+ dims.astype("int64"))
+ contours[nid] = (g.Level, node.node_ind, pg, sl)
+ node_ids = np.array(node_ids)
+ trunk = data_source.tiles.tree.trunk
+ mylog.info("Linking node (%s) contours.", len(contours))
+ amr_utils.link_node_contours(trunk, contours, tree, node_ids)
+ mylog.info("Linked.")
+ #joins = tree.cull_joins(bt)
+ #tree.add_joins(joins)
+ joins = tree.export()
+ contour_ids = defaultdict(list)
+ pbar = get_pbar("Updating joins ... ", len(contours))
+ final_joins = np.unique(joins[:,1])
+ for i, nid in enumerate(sorted(contours)):
+ level, node_ind, pg, sl = contours[nid]
+ ff = pg.my_data[0].view("int64")
+ amr_utils.update_joins(joins, ff, final_joins)
+ contour_ids[pg.parent_grid_id].append((sl, ff))
+ pbar.update(i)
pbar.finish()
- pbar = get_pbar("Calculating joins ", len(data_source._grids))
- grid_set = set()
- for gi,grid in enumerate(grids):
- pbar.update(gi)
- cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
- grid_set.update(set(cg._grids))
- fd = cg["tempContours"].astype('int64')
- boundary_tree = amr_utils.construct_boundary_relationships(fd)
- tree.update(((a, b) for a, b in boundary_tree))
- pbar.finish()
- sort_new = np.array(list(tree), dtype='int64')
- mylog.info("Coalescing %s joins", sort_new.shape[0])
- joins = coalesce_join_tree(sort_new)
- #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
- pbar = get_pbar("Joining ", len(joins))
- # This process could and should be done faster
- print "Joining..."
- t1 = time.time()
- ff = data_source["tempContours"].astype("int64")
- amr_utils.update_joins(joins, ff)
- data_source["tempContours"] = ff.astype("float64")
- #for i, new in enumerate(sorted(joins.keys())):
- # pbar.update(i)
- # old_set = joins[new]
- # for old in old_set:
- # if old == new: continue
- # i1 = (data_source["tempContours"] == old)
- # data_source["tempContours"][i1] = new
- t2 = time.time()
- print "Finished joining in %0.2e seconds" % (t2-t1)
- pbar.finish()
- data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
- del data_source.field_data["tempContours"] # Force a reload from the grids
- data_source.get_data("tempContours")
- contour_ind = {}
- i = 0
- for contour_id in np.unique(data_source["tempContours"]):
- if contour_id == -1: continue
- contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
- mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
- i += 1
- mylog.info("Identified %s contours between %0.5e and %0.5e",
- len(contour_ind.keys()),min_val,max_val)
- for grid in chain(grid_set):
- grid.field_data.pop("tempContours", None)
- del data_source.field_data["tempContours"]
- return contour_ind
+ rv = dict()
+ rv.update(contour_ids)
+ # NOTE: Because joins can appear in both a "final join" and a subsequent
+ # "join", we can't know for sure how many unique joins there are without
+ # checking if no cells match or doing an expensive operation checking for
+ # the unique set of final join values.
+ return final_joins.size, rv
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -167,12 +167,12 @@
Parameters
----------
- axis : int
- The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
field : string
This is the field which will be "projected" along the axis. If
multiple are specified (in a list) they will all be projected in
the first pass.
+ axis : int
+ The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
weight_field : string
If supplied, the field being projected will be multiplied by this
weight value before being integrated, and at the conclusion of the
@@ -274,11 +274,12 @@
for chunk in self.data_source.chunks([], "io"):
self._initialize_chunk(chunk, tree)
# This needs to be parallel_objects-ified
- for chunk in parallel_objects(self.data_source.chunks(
- chunk_fields, "io")):
- mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
- get_memory_usage()/1024.)
- self._handle_chunk(chunk, fields, tree)
+ with self.data_source._field_parameter_state(self.field_parameters):
+ for chunk in parallel_objects(self.data_source.chunks(
+ chunk_fields, "io")):
+ mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+ get_memory_usage()/1024.)
+ self._handle_chunk(chunk, fields, tree)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
merge_style = -1
@@ -308,6 +309,7 @@
nvals *= convs[None,:]
# We now convert to half-widths and center-points
data = {}
+ #non_nan = ~np.any(np.isnan(nvals), axis=-1)
data['px'] = px
data['py'] = py
data['weight_field'] = nwvals
@@ -319,8 +321,9 @@
field_data = np.hsplit(data.pop('fields'), len(fields))
for fi, field in enumerate(fields):
mylog.debug("Setting field %s", field)
- self[field] = field_data[fi].ravel()
- for i in data.keys(): self[i] = data.pop(i)
+ self[field] = field_data[fi].ravel()#[non_nan]
+ for i in data.keys():
+ self[i] = data.pop(i)#[non_nan]
mylog.info("Projection completed")
def _initialize_chunk(self, chunk, tree):
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -33,6 +33,8 @@
ParallelAnalysisInterface
from yt.utilities.parameter_file_storage import \
ParameterFileStore
+from yt.utilities.amr_kdtree.api import \
+ AMRKDTree
from .derived_quantities import DerivedQuantityCollection
from .field_info_container import \
NeedsGridType, ValidateSpatial
@@ -365,6 +367,13 @@
return s
@contextmanager
+ def _field_parameter_state(self, field_parameters):
+ old_field_parameters = self.field_parameters
+ self.field_parameters = field_parameters
+ yield
+ self.field_parameters = old_field_parameters
+
+ @contextmanager
def _field_type_state(self, ftype, finfo, obj = None):
if obj is None: obj = self
old_particle_type = obj._current_particle_type
@@ -407,6 +416,14 @@
explicit_fields.append((ftype, fname))
return explicit_fields
+ _tree = None
+
+ @property
+ def tiles(self):
+ if self._tree is not None: return self._tree
+ self._tree = AMRKDTree(self.pf, data_source=self)
+ return self._tree
+
@property
def blocks(self):
for io_chunk in self.chunks([], "io"):
@@ -751,11 +768,13 @@
self._grids = None
self.quantities = DerivedQuantityCollection(self)
- def cut_region(self, field_cuts):
+ def cut_region(self, field_cuts, field_parameters = None):
"""
- Return an InLineExtractedRegion, where the grid cells are cut on the
- fly with a set of field_cuts. It is very useful for applying
- conditions to the fields in your data object.
+ Return an InLineExtractedRegion, where the object cells are cut on the
+ fly with a set of field_cuts. It is very useful for applying
+ conditions to the fields in your data object. Note that in previous
+ versions of yt, this accepted 'grid' as a variable, but presently it
+ requires 'obj'.
Examples
--------
@@ -763,19 +782,12 @@
>>> pf = load("RedshiftOutput0005")
>>> ad = pf.h.all_data()
- >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+ >>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
>>> print cr.quantities["TotalQuantity"]("CellMassMsun")
-
"""
- return YTValueCutExtractionBase(self, field_cuts)
-
- def extract_region(self, indices):
- """
- Return an ExtractedRegion where the points contained in it are defined
- as the points in `this` data object with the given *indices*.
- """
- fp = self.field_parameters.copy()
- return YTSelectedIndicesBase(self, indices, field_parameters = fp)
+ cr = self.pf.h.cut_region(self, field_cuts,
+ field_parameters = field_parameters)
+ return cr
def extract_isocontours(self, field, value, filename = None,
rescale = False, sample_values = None):
@@ -966,12 +978,15 @@
ff, mask, grid.LeftEdge, grid.dds)
def extract_connected_sets(self, field, num_levels, min_val, max_val,
- log_space=True, cumulative=True, cache=False):
+ log_space=True, cumulative=True):
"""
This function will create a set of contour objects, defined
by having connected cell structures, which can then be
studied and used to 'paint' their source grids, thus enabling
them to be plotted.
+
+ Note that this function *can* return a connected set object that has no
+ member values.
"""
if log_space:
cons = np.logspace(np.log10(min_val),np.log10(max_val),
@@ -979,8 +994,6 @@
else:
cons = np.linspace(min_val, max_val, num_levels+1)
contours = {}
- if cache: cached_fields = defaultdict(lambda: dict())
- else: cached_fields = None
for level in range(num_levels):
contours[level] = {}
if cumulative:
@@ -988,10 +1001,11 @@
else:
mv = cons[level+1]
from yt.analysis_modules.level_sets.api import identify_contours
- cids = identify_contours(self, field, cons[level], mv,
- cached_fields)
- for cid, cid_ind in cids.items():
- contours[level][cid] = self.extract_region(cid_ind)
+ nj, cids = identify_contours(self, field, cons[level], mv)
+ for cid in range(nj):
+ contours[level][cid] = self.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
return cons, contours
def paint_grids(self, field, value, default_value=None):
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -583,3 +583,85 @@
self.set_field_parameter('e0', e0)
self.set_field_parameter('e1', e1)
self.set_field_parameter('e2', e2)
+
+class YTCutRegionBase(YTSelectionContainer3D):
+ """
+ This is a data object designed to allow individuals to apply logical
+ operations to fields or particles and filter as a result of those cuts.
+
+ Parameters
+ ----------
+ base_object : YTSelectionContainer3D
+ The object to which cuts will be applied.
+ conditionals : list of strings
+ A list of conditionals that will be evaluated. In the namespace
+ available, these conditionals will have access to 'obj' which is a data
+ object of unknown shape, and they must generate a boolean array. For
+ instance, conditionals = ["obj['temperature'] < 1e3"]
+
+ Examples
+ --------
+
+ >>> pf = load("DD0010/moving7_0010")
+ >>> sp = pf.h.sphere("max", (1.0, 'mpc'))
+ >>> cr = pf.h.cut_region(sp, ["obj['temperature'] < 1e3"])
+ """
+ _type_name = "cut_region"
+ _con_args = ("base_object", "conditionals")
+ def __init__(self, base_object, conditionals, pf = None,
+ field_parameters = None):
+ super(YTCutRegionBase, self).__init__(base_object.center, pf, field_parameters)
+ self.conditionals = ensure_list(conditionals)
+ self.base_object = base_object
+ self._selector = None
+ # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
+ # ires and get_data
+
+ @property
+ def selector(self):
+ raise NotImplementedError
+
+ def chunks(self, fields, chunking_style, **kwargs):
+ # We actually want to chunk the sub-chunk, not ourselves. We have no
+ # chunks to speak of, as we do not data IO.
+ for chunk in self.hierarchy._chunk(self.base_object,
+ chunking_style,
+ **kwargs):
+ with self.base_object._chunked_read(chunk):
+ self.get_data(fields)
+ yield self
+
+ def get_data(self, fields = None):
+ fields = ensure_list(fields)
+ self.base_object.get_data(fields)
+ ind = self._cond_ind
+ for field in fields:
+ self.field_data[field] = self.base_object[field][ind]
+
+ @property
+ def _cond_ind(self):
+ ind = None
+ obj = self.base_object
+ with obj._field_parameter_state(self.field_parameters):
+ for cond in self.conditionals:
+ res = eval(cond)
+ if ind is None: ind = res
+ np.logical_and(res, ind, ind)
+ return ind
+
+ @property
+ def icoords(self):
+ return self.base_object.icoords[self._cond_ind,:]
+
+ @property
+ def fcoords(self):
+ return self.base_object.fcoords[self._cond_ind,:]
+
+ @property
+ def ires(self):
+ return self.base_object.ires[self._cond_ind]
+
+ @property
+ def fwidth(self):
+ return self.base_object.fwidth[self._cond_ind,:]
+
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -6,15 +6,14 @@
def test_cut_region():
# We decompose in different ways
- return #TESTDISABLED
for nprocs in [1, 2, 4, 8]:
pf = fake_random_pf(64, nprocs = nprocs,
fields = ("Density", "Temperature", "x-velocity"))
# We'll test two objects
dd = pf.h.all_data()
- r = dd.cut_region( [ "grid['Temperature'] > 0.5",
- "grid['Density'] < 0.75",
- "grid['x-velocity'] > 0.25" ])
+ r = dd.cut_region( [ "obj['Temperature'] > 0.5",
+ "obj['Density'] < 0.75",
+ "obj['x-velocity'] > 0.25" ])
t = ( (dd["Temperature"] > 0.5 )
& (dd["Density"] < 0.75 )
& (dd["x-velocity"] > 0.25 ) )
@@ -23,33 +22,21 @@
yield assert_equal, np.all(r["x-velocity"] > 0.25), True
yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+ r2 = r.cut_region( [ "obj['Temperature'] < 0.75" ] )
t2 = (r["Temperature"] < 0.75)
yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
yield assert_equal, np.all(r2["Temperature"] < 0.75), True
-def test_extract_region():
- # We decompose in different ways
- return #TESTDISABLED
- for nprocs in [1, 2, 4, 8]:
- pf = fake_random_pf(64, nprocs = nprocs,
- fields = ("Density", "Temperature", "x-velocity"))
- # We'll test two objects
+ # Now we can test some projections
dd = pf.h.all_data()
- t = ( (dd["Temperature"] > 0.5 )
- & (dd["Density"] < 0.75 )
- & (dd["x-velocity"] > 0.25 ) )
- r = dd.extract_region(t)
- yield assert_equal, np.all(r["Temperature"] > 0.5), True
- yield assert_equal, np.all(r["Density"] < 0.75), True
- yield assert_equal, np.all(r["x-velocity"] > 0.25), True
- yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
- yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- t2 = (r["Temperature"] < 0.75)
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
- yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
- yield assert_equal, np.all(r2["Temperature"] < 0.75), True
- t3 = (r["Temperature"] < 0.75)
- r3 = r.extract_region( t3 )
- yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
- yield assert_equal, np.all(r3["Temperature"] < 0.75), True
+ cr = dd.cut_region(["obj['Ones'] > 0"])
+ for weight in [None, "Density"]:
+ p1 = pf.h.proj("Density", 0, data_source=dd, weight_field=weight)
+ p2 = pf.h.proj("Density", 0, data_source=cr, weight_field=weight)
+ for f in p1.field_data:
+ yield assert_almost_equal, p1[f], p2[f]
+ cr = dd.cut_region(["obj['Density'] > 0.25"])
+ p2 = pf.h.proj("Density", 2, data_source=cr)
+ yield assert_equal, p2["Density"].max() > 0.25, True
+ p2 = pf.h.proj("Density", 2, data_source=cr, weight_field = "Density")
+ yield assert_equal, p2["Density"].max() > 0.25, True
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -582,12 +582,18 @@
units=r"\rm{s}^{-1}")
def _Contours(field, data):
- return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
- display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
- validators=[ValidateSpatial(0), ValidateGridType()],
- take_log=False, display_field=False)
+ fd = data.get_field_parameter("contour_slices")
+ vals = data["Ones"] * -1
+ if fd is None or fd == 0.0:
+ return vals
+ for sl, v in fd.get(data.id, []):
+ vals[sl] = v
+ return vals
+add_field("Contours", validators=[ValidateSpatial(0)],
+ take_log=False,
+ display_field=False,
+ projection_conversion="1",
+ function=_Contours)
def obtain_velocities(data):
return obtain_rv_vec(data)
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -688,7 +688,7 @@
if len(self.queue) == 0: raise StopIteration
chunk = YTDataChunk(None, "cache", self.queue, cache=False)
self.cache = self.geometry_handler.io._read_chunk_data(
- chunk, self.preload_fields)
+ chunk, self.preload_fields) or {}
g = self.queue.pop(0)
g._initialize_cache(self.cache.pop(g.id, {}))
return g
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -273,7 +273,9 @@
giter = sorted(gobjs, key = -g.Level)
elif sort is None:
giter = gobjs
- if self._preload_implemented and preload_fields is not None and ngz == 0:
+ if preload_fields is None: preload_fields = []
+ preload_fields, _ = self._split_fields(preload_fields)
+ if self._preload_implemented and len(preload_fields) > 0 and ngz == 0:
giter = ChunkDataCache(list(giter), preload_fields, self)
for i, og in enumerate(giter):
if ngz > 0:
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -184,6 +184,24 @@
for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
yield self.get_brick_data(node)
+ def slice_traverse(self, viewpoint = None):
+ if not hasattr(self.pf.h, "grid"):
+ raise NotImplementedError
+ for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+ grid = self.pf.h.grids[node.grid - self._id_offset]
+ dds = grid.dds
+ gle = grid.LeftEdge
+ nle = get_left_edge(node)
+ nre = get_right_edge(node)
+ li = np.rint((nle-gle)/dds).astype('int32')
+ ri = np.rint((nre-gle)/dds).astype('int32')
+ dims = (ri - li).astype('int32')
+ sl = (slice(li[0], ri[0]),
+ slice(li[1], ri[1]),
+ slice(li[2], ri[2]))
+ gi = grid.get_global_startindex() + li
+ yield grid, node, (sl, dims, gi)
+
def get_node(self, nodeid):
path = np.binary_repr(nodeid)
depth = 1
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -444,19 +444,28 @@
if new_result is None:
return
assert(len(new_result) == len(old_result))
+ nind, oind = None, None
for k in new_result:
assert (k in old_result)
+ if oind is None:
+ oind = np.array(np.isnan(old_result[k]))
+ np.logical_or(oind, np.isnan(old_result[k]), oind)
+ if nind is None:
+ nind = np.array(np.isnan(new_result[k]))
+ np.logical_or(nind, np.isnan(new_result[k]), nind)
+ oind = ~oind
+ nind = ~nind
for k in new_result:
err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
(k, self.field, self.weight_field, self.axis)
if k == 'weight_field' and self.weight_field is None:
continue
+ nres, ores = new_result[k][nind], old_result[k][oind]
if self.decimals is None:
- assert_equal(new_result[k], old_result[k],
- err_msg=err_msg)
+ assert_equal(nres, ores, err_msg=err_msg)
else:
- assert_allclose(new_result[k], old_result[k],
- 10.**-(self.decimals), err_msg=err_msg)
+ assert_allclose(nres, ores, 10.**-(self.decimals),
+ err_msg=err_msg)
class PixelizedProjectionValuesTest(AnswerTestingTest):
_type_name = "PixelizedProjectionValues"
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,9 +18,17 @@
cimport cython
from libc.stdlib cimport malloc, free
+from amr_kdtools cimport _find_node, Node
+from grid_traversal cimport VolumeContainer, PartitionedGrid, \
+ vc_index, vc_pos_index
+
cdef extern from "math.h":
double fabs(double x)
+cdef extern from "stdlib.h":
+ # NOTE that size_t might not be int
+ void *alloca(int)
+
cdef inline np.int64_t i64max(np.int64_t i0, np.int64_t i1):
if i0 > i1: return i0
return i1
@@ -29,87 +37,407 @@
if i0 < i1: return i0
return i1
-cdef extern from "union_find.h":
- ctypedef struct forest_node:
- void *value
- forest_node *parent
- int rank
+cdef struct ContourID
- forest_node* MakeSet(void* value)
- void Union(forest_node* node1, forest_node* node2)
- forest_node* Find(forest_node* node)
+cdef struct ContourID:
+ np.int64_t contour_id
+ ContourID *parent
+ ContourID *next
+ ContourID *prev
-ctypedef struct CellIdentifier:
- np.int64_t hindex
- int level
+cdef ContourID *contour_create(np.int64_t contour_id,
+ ContourID *prev = NULL):
+ node = <ContourID *> malloc(sizeof(ContourID))
+ #print "Creating contour with id", contour_id
+ node.contour_id = contour_id
+ node.next = node.parent = NULL
+ node.prev = prev
+ if prev != NULL: prev.next = node
+ return node
-cdef class GridContourContainer:
- cdef np.int64_t dims[3]
- cdef np.int64_t start_indices[3]
- cdef forest_node **join_tree
- cdef np.int64_t ncells
+cdef void contour_delete(ContourID *node):
+ if node.prev != NULL: node.prev.next = node.next
+ if node.next != NULL: node.next.prev = node.prev
+ free(node)
- def __init__(self, dimensions, indices):
- cdef int i
- self.ncells = 1
- for i in range(3):
- self.ncells *= dimensions[i]
- self.dims[i] = dimensions[i]
- self.start_indices[i] = indices[i]
- self.join_tree = <forest_node **> malloc(sizeof(forest_node)
- * self.ncells)
- for i in range(self.ncells): self.join_tree[i] = NULL
+cdef ContourID *contour_find(ContourID *node):
+ cdef ContourID *temp, *root
+ root = node
+ while root.parent != NULL and root.parent != root:
+ root = root.parent
+ if root == root.parent: root.parent = NULL
+ while node.parent != NULL:
+ temp = node.parent
+ node.parent = root
+ node = temp
+ return root
+cdef void contour_union(ContourID *node1, ContourID *node2):
+ if node1.contour_id < node2.contour_id:
+ node2.parent = node1
+ elif node2.contour_id < node1.contour_id:
+ node1.parent = node2
+
+cdef struct CandidateContour
+
+cdef struct CandidateContour:
+ np.int64_t contour_id
+ np.int64_t join_id
+ CandidateContour *next
+
+cdef int candidate_contains(CandidateContour *first,
+ np.int64_t contour_id,
+ np.int64_t join_id = -1):
+ while first != NULL:
+ if first.contour_id == contour_id \
+ and first.join_id == join_id: return 1
+ first = first.next
+ return 0
+
+cdef CandidateContour *candidate_add(CandidateContour *first,
+ np.int64_t contour_id,
+ np.int64_t join_id = -1):
+ cdef CandidateContour *node
+ node = <CandidateContour *> malloc(sizeof(CandidateContour))
+ node.contour_id = contour_id
+ node.join_id = join_id
+ node.next = first
+ return node
+
+cdef class ContourTree:
+ # This class is essentially a Union-Find algorithm. What we want to do is
+ # to, given a connection between two objects, identify the unique ID for
+ # those two objects. So what we have is a collection of contours, and they
+ # eventually all get joined and contain lots of individual IDs. But it's
+ # easy to find the *first* contour, i.e., the primary ID, for each of the
+ # subsequent IDs.
+ #
+ # This means that we can connect id 202483 to id 2472, and if id 2472 is
+ # connected to id 143, the connection will *actually* be from 202483 to
+ # 143. In this way we can speed up joining things and knowing their
+ # "canonical" id.
+ #
+ # This is a multi-step process, since we first want to connect all of the
+ # contours, then we end up wanting to coalesce them, and ultimately we join
+ # them at the end. The join produces a table that maps the initial to the
+ # final, and we can go through and just update all of those.
+ cdef ContourID *first
+ cdef ContourID *last
+
+ def clear(self):
+ # Here, we wipe out ALL of our contours, but not the pointers to them
+ cdef ContourID *cur, *next
+ cur = self.first
+ while cur != NULL:
+ next = cur.next
+ free(cur)
+ cur = next
+ self.first = self.last = NULL
+
+ def __init__(self):
+ self.first = self.last = NULL
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
+ # This adds new contours, from the given contour IDs, to the tree.
+ # Each one can be connected to a parent, as well as to next/prev in the
+ # set of contours belonging to this tree.
+ cdef int i, n
+ n = contour_ids.shape[0]
+ cdef ContourID *cur = self.last
+ for i in range(n):
+ #print i, contour_ids[i]
+ cur = contour_create(contour_ids[i], cur)
+ if self.first == NULL: self.first = cur
+ self.last = cur
+
+ def add_contour(self, np.int64_t contour_id):
+ self.last = contour_create(contour_id, self.last)
+
+ def cull_candidates(self, np.ndarray[np.int64_t, ndim=3] candidates):
+ # This function looks at each preliminary contour ID belonging to a
+ # given collection of values, and then if need be it creates a new
+ # contour for it.
+ cdef int i, j, k, ni, nj, nk, nc
+ cdef CandidateContour *first = NULL
+ cdef CandidateContour *temp
+ cdef np.int64_t cid
+ nc = 0
+ ni = candidates.shape[0]
+ nj = candidates.shape[1]
+ nk = candidates.shape[2]
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ cid = candidates[i,j,k]
+ if cid == -1: continue
+ if candidate_contains(first, cid) == 0:
+ nc += 1
+ first = candidate_add(first, cid)
+ cdef np.ndarray[np.int64_t, ndim=1] contours
+ contours = np.empty(nc, dtype="int64")
+ i = 0
+ # This removes all the temporary contours for this set of contours and
+ # instead constructs a final list of them.
+ while first != NULL:
+ contours[i] = first.contour_id
+ i += 1
+ temp = first.next
+ free(first)
+ first = temp
+ return contours
+
+ def cull_joins(self, np.ndarray[np.int64_t, ndim=2] cjoins):
+ # This coalesces contour IDs, so that we have only the final name
+ # resolutions -- the .join_id from a candidate. So many items will map
+ # to a single join_id.
+ cdef int i, j, k, ni, nj, nk, nc
+ cdef CandidateContour *first = NULL
+ cdef CandidateContour *temp
+ cdef np.int64_t cid1, cid2
+ nc = 0
+ ni = cjoins.shape[0]
+ for i in range(ni):
+ cid1 = cjoins[i,0]
+ cid2 = cjoins[i,1]
+ if cid1 == -1: continue
+ if cid2 == -1: continue
+ if candidate_contains(first, cid1, cid2) == 0:
+ nc += 1
+ first = candidate_add(first, cid1, cid2)
+ cdef np.ndarray[np.int64_t, ndim=2] contours
+ contours = np.empty((nc,2), dtype="int64")
+ i = 0
+ while first != NULL:
+ contours[i,0] = first.contour_id
+ contours[i,1] = first.join_id
+ i += 1
+ temp = first.next
+ free(first)
+ first = temp
+ return contours
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
+ cdef int i, n, ins
+ cdef np.int64_t cid1, cid2
+ # Okay, this requires lots of iteration, unfortunately
+ cdef ContourID *cur, *root
+ n = join_tree.shape[0]
+ #print "Counting"
+ #print "Checking", self.count()
+ for i in range(n):
+ ins = 0
+ cid1 = join_tree[i, 0]
+ cid2 = join_tree[i, 1]
+ c1 = c2 = NULL
+ cur = self.first
+ #print "Looking for ", cid1, cid2
+ while c1 == NULL or c2 == NULL:
+ if cur.contour_id == cid1:
+ c1 = contour_find(cur)
+ if cur.contour_id == cid2:
+ c2 = contour_find(cur)
+ ins += 1
+ cur = cur.next
+ if cur == NULL: break
+ if c1 == NULL or c2 == NULL:
+ if c1 == NULL: print " Couldn't find ", cid1
+ if c2 == NULL: print " Couldn't find ", cid2
+ print " Inspected ", ins
+ raise RuntimeError
+ else:
+ contour_union(c1, c2)
+
+ def count(self):
+ cdef int n = 0
+ cdef ContourID *cur = self.first
+ while cur != NULL:
+ cur = cur.next
+ n += 1
+ return n
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def export(self):
+ cdef int n = self.count()
+ cdef ContourID *cur, *root
+ cur = self.first
+ cdef np.ndarray[np.int64_t, ndim=2] joins
+ joins = np.empty((n, 2), dtype="int64")
+ n = 0
+ while cur != NULL:
+ root = contour_find(cur)
+ joins[n, 0] = cur.contour_id
+ joins[n, 1] = root.contour_id
+ cur = cur.next
+ n += 1
+ return joins
+
def __dealloc__(self):
- cdef int i
- for i in range(self.ncells):
- if self.join_tree[i] != NULL: free(self.join_tree[i])
- free(self.join_tree)
+ self.clear()
- #def construct_join_tree(self,
- # np.ndarray[np.float64_t, ndim=3] field,
- # np.ndarray[np.bool_t, ndim=3] mask):
- # # This only looks at the components of the grid that are actually
- # # inside this grid -- boundary conditions are handled later.
- # pass
+cdef class TileContourTree:
+ cdef np.float64_t min_val
+ cdef np.float64_t max_val
-#@cython.boundscheck(False)
-#@cython.wraparound(False)
-def construct_boundary_relationships(
- np.ndarray[dtype=np.int64_t, ndim=3] contour_ids):
- # We only look at the boundary and one cell in
- cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj
+ def __init__(self, np.float64_t min_val, np.float64_t max_val):
+ self.min_val = min_val
+ self.max_val = max_val
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def identify_contours(self, np.ndarray[np.float64_t, ndim=3] values,
+ np.ndarray[np.int64_t, ndim=3] contour_ids,
+ np.int64_t start):
+ # This just looks at neighbor values and tries to identify which zones
+ # are touching by face within a given brick.
+ cdef int i, j, k, ni, nj, nk, offset
+ cdef int off_i, off_j, off_k, oi, ok, oj
+ cdef ContourID *cur = NULL
+ cdef ContourID *c1, *c2
+ cdef np.float64_t v
+ cdef np.int64_t nc
+ ni = values.shape[0]
+ nj = values.shape[1]
+ nk = values.shape[2]
+ nc = 0
+ cdef ContourID **container = <ContourID**> malloc(
+ sizeof(ContourID*)*ni*nj*nk)
+ for i in range(ni*nj*nk): container[i] = NULL
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ v = values[i,j,k]
+ if v < self.min_val or v > self.max_val: continue
+ nc += 1
+ c1 = contour_create(nc + start)
+ cur = container[i*nj*nk + j*nk + k] = c1
+ for oi in range(3):
+ off_i = oi - 1 + i
+ if not (0 <= off_i < ni): continue
+ for oj in range(3):
+ off_j = oj - 1 + j
+ if not (0 <= off_j < nj): continue
+ for ok in range(3):
+ if oi == oj == ok == 1: continue
+ if off_k > k and off_j > j and off_i > i:
+ continue
+ off_k = ok - 1 + k
+ if not (0 <= off_k < nk): continue
+ offset = off_i*nj*nk + off_j*nk + off_k
+ c2 = container[offset]
+ if c2 == NULL: continue
+ c2 = contour_find(c2)
+ contour_union(cur, c2)
+ cur = contour_find(cur)
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ c1 = container[i*nj*nk + j*nk + k]
+ if c1 == NULL: continue
+ cur = c1
+ c1 = contour_find(c1)
+ contour_ids[i,j,k] = c1.contour_id
+
+ for i in range(ni*nj*nk):
+ if container[i] != NULL: free(container[i])
+ free(container)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def link_node_contours(Node trunk, contours, ContourTree tree,
+ np.ndarray[np.int64_t, ndim=1] node_ids):
+ cdef int n_nodes = node_ids.shape[0]
+ cdef np.int64_t node_ind
+ cdef VolumeContainer **vcs = <VolumeContainer **> malloc(
+ sizeof(VolumeContainer*) * n_nodes)
+ cdef int i
+ cdef PartitionedGrid pg
+ for i in range(n_nodes):
+ pg = contours[node_ids[i]][2]
+ vcs[i] = pg.container
+ cdef np.ndarray[np.uint8_t] examined = np.zeros(n_nodes, "uint8")
+ for nid, cinfo in sorted(contours.items(), key = lambda a: -a[1][0]):
+ level, node_ind, pg, sl = cinfo
+ construct_boundary_relationships(trunk, tree, node_ind,
+ examined, vcs, node_ids)
+ examined[node_ind] = 1
+
+cdef inline void get_spos(VolumeContainer *vc, int i, int j, int k,
+ int axis, np.float64_t *spos):
+ spos[0] = vc.left_edge[0] + i * vc.dds[0]
+ spos[1] = vc.left_edge[1] + j * vc.dds[1]
+ spos[2] = vc.left_edge[2] + k * vc.dds[2]
+ spos[axis] += 0.5 * vc.dds[axis]
+
+cdef inline int spos_contained(VolumeContainer *vc, np.float64_t *spos):
+ cdef int i
+ for i in range(3):
+ if spos[i] < vc.left_edge[i] or spos[i] > vc.right_edge[i]: return 0
+ return 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void construct_boundary_relationships(Node trunk, ContourTree tree,
+ np.int64_t nid, np.ndarray[np.uint8_t, ndim=1] examined,
+ VolumeContainer **vcs,
+ np.ndarray[np.int64_t, ndim=1] node_ids):
+ # We only look at the boundary and find the nodes next to it.
+ # Contours is a dict, keyed by the node.id.
+ cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
cdef np.int64_t c1, c2
- nx = contour_ids.shape[0]
- ny = contour_ids.shape[1]
- nz = contour_ids.shape[2]
+ cdef Node adj_node
+ cdef VolumeContainer *vc1, *vc0 = vcs[nid]
+ nx = vc0.dims[0]
+ ny = vc0.dims[1]
+ nz = vc0.dims[2]
+ cdef int s = (ny*nx + nx*nz + ny*nz) * 18
# We allocate an array of fixed (maximum) size
- cdef int s = (ny*nx + nx*nz + ny*nz - 2) * 18
- cdef np.ndarray[np.int64_t, ndim=2] tree = np.zeros((s, 2), dtype="int64")
+ cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
cdef int ti = 0
- # First x-pass
+ cdef int index
+ cdef np.float64_t spos[3]
+
+ # First the x-pass
for i in range(ny):
for j in range(nz):
for offset_i in range(3):
oi = offset_i - 1
- if i == 0 and oi == -1: continue
- if i == ny - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == nz - 1 and oj == 1: continue
- c1 = contour_ids[0, i, j]
- c2 = contour_ids[1, i + oi, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[nx-1, i, j]
- c2 = contour_ids[nx-2, i + oi, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
+ # Adjust by -1 in x, then oi and oj in y and z
+ get_spos(vc0, -1, i + oi, j + oj, 0, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, 0, i, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ # This is outside our vc
+ get_spos(vc0, nx, i + oi, j + oj, 0, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, nx - 1, i, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
# Now y-pass
for i in range(nx):
for j in range(nz):
@@ -119,43 +447,75 @@
if i == nx - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == nz - 1 and oj == 1: continue
- c1 = contour_ids[i, 0, j]
- c2 = contour_ids[i + oi, 1, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[i, ny-1, j]
- c2 = contour_ids[i + oi, ny-2, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
+ get_spos(vc0, i + oi, -1, j + oj, 1, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, 0, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ get_spos(vc0, i + oi, ny, j + oj, 1, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, ny - 1, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ # Now z-pass
for i in range(nx):
for j in range(ny):
for offset_i in range(3):
oi = offset_i - 1
- if i == 0 and oi == -1: continue
- if i == nx - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == ny - 1 and oj == 1: continue
- c1 = contour_ids[i, j, 0]
- c2 = contour_ids[i + oi, j + oj, 1]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[i, j, nz-1]
- c2 = contour_ids[i + oi, j + oj, nz-2]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- return tree[:ti,:]
+ get_spos(vc0, i + oi, j + oj, -1, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, 0)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ get_spos(vc0, i + oi, j + oj, nz, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, nz - 1)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ if ti == 0: return
+ new_joins = tree.cull_joins(joins[:ti,:])
+ tree.add_joins(new_joins)
cdef inline int are_neighbors(
np.float64_t x1, np.float64_t y1, np.float64_t z1,
@@ -228,16 +588,23 @@
@cython.boundscheck(False)
@cython.wraparound(False)
-def update_joins(joins, np.ndarray[np.int64_t, ndim=1] contour_ids):
- cdef np.int64_t new, old, i, oi
- cdef int n, on
- cdef np.ndarray[np.int64_t, ndim=1] old_set
- #print contour_ids.shape[0]
- n = contour_ids.shape[0]
- for new, old_set in joins:
- #print new
- on = old_set.shape[0]
- for i in range(n):
- for oi in range(on):
- old = old_set[oi]
- if contour_ids[i] == old: contour_ids[i] = new
+def update_joins(np.ndarray[np.int64_t, ndim=2] joins,
+ np.ndarray[np.int64_t, ndim=3] contour_ids,
+ np.ndarray[np.int64_t, ndim=1] final_joins):
+ cdef np.int64_t new, old
+ cdef int i, j, nj, nf
+ cdef int ci, cj, ck
+ nj = joins.shape[0]
+ nf = final_joins.shape[0]
+ for ci in range(contour_ids.shape[0]):
+ for cj in range(contour_ids.shape[1]):
+ for ck in range(contour_ids.shape[2]):
+ if contour_ids[ci,cj,ck] == -1: continue
+ for j in range(nj):
+ if contour_ids[ci,cj,ck] == joins[j,0]:
+ contour_ids[ci,cj,ck] = joins[j,1]
+ break
+ for j in range(nf):
+ if contour_ids[ci,cj,ck] == final_joins[j]:
+ contour_ids[ci,cj,ck] = j + 1
+ break
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/utilities/lib/amr_kdtools.pxd
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -0,0 +1,39 @@
+"""
+AMR kD-Tree Cython Tools
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+
+cdef struct Split:
+ int dim
+ np.float64_t pos
+
+cdef class Node
+
+cdef class Node:
+ cdef public Node left
+ cdef public Node right
+ cdef public Node parent
+ cdef public int grid
+ cdef public np.int64_t node_id
+ cdef public np.int64_t node_ind
+ cdef np.float64_t left_edge[3]
+ cdef np.float64_t right_edge[3]
+ cdef public data
+ cdef Split * split
+ cdef int level
+
+cdef int point_in_node(Node node, np.ndarray[np.float64_t, ndim=1] point)
+cdef Node _find_node(Node node, np.float64_t *point)
+cdef int _kd_is_leaf(Node node)
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -25,25 +25,11 @@
DEF Nch = 4
-cdef struct Split:
- int dim
- np.float64_t pos
-
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
cdef class Node:
- cdef public Node left
- cdef public Node right
- cdef public Node parent
- cdef public int grid
- cdef public np.int64_t node_id
- cdef np.float64_t left_edge[3]
- cdef np.float64_t right_edge[3]
- cdef public data
- cdef Split * split
-
def __cinit__(self,
Node parent,
Node left,
@@ -152,11 +138,11 @@
def kd_traverse(Node trunk, viewpoint=None):
if viewpoint is None:
for node in depth_traverse(trunk):
- if kd_is_leaf(node) and node.grid != -1:
+ if _kd_is_leaf(node) == 1 and node.grid != -1:
yield node
else:
for node in viewpoint_traverse(trunk, viewpoint):
- if kd_is_leaf(node) and node.grid != -1:
+ if _kd_is_leaf(node) == 1 and node.grid != -1:
yield node
@cython.boundscheck(False)
@@ -172,7 +158,7 @@
if not should_i_build(node, rank, size):
return
- if kd_is_leaf(node):
+ if _kd_is_leaf(node) == 1:
insert_grid(node, gle, gre, gid, rank, size)
else:
less_id = gle[node.split.dim] < node.split.pos
@@ -295,7 +281,7 @@
if not should_i_build(node, rank, size):
return
- if kd_is_leaf(node):
+ if _kd_is_leaf(node) == 1:
insert_grids(node, ngrids, gles, gres, gids, rank, size)
return
@@ -766,11 +752,16 @@
assert has_l_child == has_r_child
return has_l_child
+cdef int _kd_is_leaf(Node node):
+ if node.left is None or node.right is None:
+ return 1
+ return 0
+
def step_depth(Node current, Node previous):
'''
Takes a single step in the depth-first traversal
'''
- if kd_is_leaf(current): # At a leaf, move back up
+ if _kd_is_leaf(current) == 1: # At a leaf, move back up
previous = current
current = current.parent
@@ -862,7 +853,7 @@
Takes a single step in the viewpoint based traversal. Always
goes to the node furthest away from viewpoint first.
'''
- if kd_is_leaf(current): # At a leaf, move back up
+ if _kd_is_leaf(current) == 1: # At a leaf, move back up
previous = current
current = current.parent
elif current.split.dim is None: # This is a dead node
@@ -913,6 +904,13 @@
inside *= node.right_edge[i] > point[i]
return inside
+cdef Node _find_node(Node node, np.float64_t *point):
+ while _kd_is_leaf(node) == 0:
+ if point[node.split.dim] < node.split.pos:
+ node = node.left
+ else:
+ node = node.right
+ return node
def find_node(Node node,
np.ndarray[np.float64_t, ndim=1] point):
@@ -920,12 +918,5 @@
Find the AMRKDTree node enclosing a position
"""
assert(point_in_node(node, point))
- while not kd_is_leaf(node):
- if point[node.split.dim] < node.split.pos:
- node = node.left
- else:
- node = node.right
- return node
+ return _find_node(node, <np.float64_t *> point.data)
-
-
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -17,6 +17,7 @@
import numpy as np
cimport numpy as np
cimport cython
+cimport kdtree_utils
cdef struct VolumeContainer:
int n_fields
@@ -29,6 +30,20 @@
np.float64_t idds[3]
int dims[3]
+cdef class PartitionedGrid:
+ cdef public object my_data
+ cdef public object source_mask
+ cdef public object LeftEdge
+ cdef public object RightEdge
+ cdef public int parent_grid_id
+ cdef VolumeContainer *container
+ cdef kdtree_utils.kdtree *star_list
+ cdef np.float64_t star_er
+ cdef np.float64_t star_sigma_num
+ cdef np.float64_t star_coeff
+ cdef void get_vector_field(self, np.float64_t pos[3],
+ np.float64_t *vel, np.float64_t *vel_mag)
+
ctypedef void sample_function(
VolumeContainer *vc,
np.float64_t v_pos[3],
@@ -45,3 +60,12 @@
void *data,
np.float64_t *return_t = *,
np.float64_t enter_t = *) nogil
+
+cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
+ return (i*vc.dims[1]+j)*vc.dims[2]+k
+
+cdef inline int vc_pos_index(VolumeContainer *vc, np.float64_t *spos):
+ cdef int i, index[3]
+ for i in range(3):
+ index[i] = <int> ((spos[i] - vc.left_edge[i]) * vc.idds[i])
+ return vc_index(vc, index[0], index[1], index[2])
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -16,7 +16,6 @@
import numpy as np
cimport numpy as np
cimport cython
-cimport kdtree_utils
#cimport healpix_interface
from libc.stdlib cimport malloc, free, abs
from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -58,16 +57,6 @@
void *data) nogil
cdef class PartitionedGrid:
- cdef public object my_data
- cdef public object source_mask
- cdef public object LeftEdge
- cdef public object RightEdge
- cdef public int parent_grid_id
- cdef VolumeContainer *container
- cdef kdtree_utils.kdtree *star_list
- cdef np.float64_t star_er
- cdef np.float64_t star_sigma_num
- cdef np.float64_t star_coeff
@cython.boundscheck(False)
@cython.wraparound(False)
diff -r 4a6cd04dadf28e013d7ad8d82f188d6ac7ac432f -r 761425f895bc86c2929c21327f7487df1dc68cd9 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -65,7 +65,9 @@
["yt/utilities/lib/ContourFinding.pyx",
"yt/utilities/lib/union_find.c"],
include_dirs=["yt/utilities/lib/"],
- libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+ libraries=["m"],
+ depends=["yt/utilities/lib/fp_utils.pxd",
+ "yt/utilities/lib/amr_kdtools.pxd"])
config.add_extension("DepthFirstOctree",
["yt/utilities/lib/DepthFirstOctree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
https://bitbucket.org/yt_analysis/yt/commits/831c3d55fb61/
Changeset: 831c3d55fb61
Branch: yt-3.0
User: MatthewTurk
Date: 2013-12-03 14:11:42
Summary: Merged in MatthewTurk/yt/yt-3.0 (pull request #656)
A few changes missed from the yt-3.0 pullin
Affected #: 22 files
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -14,7 +14,6 @@
#-----------------------------------------------------------------------------
from .contour_finder import \
- coalesce_join_tree, \
identify_contours
from .clump_handling import \
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -107,10 +107,11 @@
print "Wiping out existing children clumps."
self.children = []
if max_val is None: max_val = self.max_val
- contour_info = identify_contours(self.data, self.field, min_val, max_val,
- self.cached_fields)
- for cid in contour_info:
- new_clump = self.data.extract_region(contour_info[cid])
+ nj, cids = identify_contours(self.data, self.field, min_val, max_val)
+ for cid in range(nj):
+ new_clump = self.data.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
self.children.append(Clump(new_clump, self, self.field,
self.cached_fields,function=self.function,
clump_info=self.clump_info))
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -20,120 +20,52 @@
import yt.utilities.data_point_utilities as data_point_utilities
import yt.utilities.lib as amr_utils
-def coalesce_join_tree(jtree1):
- joins = defaultdict(set)
- nj = jtree1.shape[0]
- for i1 in range(nj):
- current_new = jtree1[i1, 0]
- current_old = jtree1[i1, 1]
- for i2 in range(nj):
- if jtree1[i2, 1] == current_new:
- current_new = max(current_new, jtree1[i2, 0])
- jtree1[i1, 0] = current_new
- for i1 in range(nj):
- joins[jtree1[i1, 0]].update([jtree1[i1, 1], jtree1[i1, 0]])
- updated = -1
- while updated != 0:
- keys = list(reversed(sorted(joins.keys())))
- updated = 0
- for k1 in keys + keys[::-1]:
- if k1 not in joins: continue
- s1 = joins[k1]
- for k2 in keys + keys[::-1]:
- if k2 >= k1: continue
- if k2 not in joins: continue
- s2 = joins[k2]
- if k2 in s1:
- s1.update(joins.pop(k2))
- updated += 1
- elif not s1.isdisjoint(s2):
- s1.update(joins.pop(k2))
- s1.update([k2])
- updated += 1
- tr = []
- for k in joins.keys():
- v = joins.pop(k)
- tr.append((k, np.array(list(v), dtype="int64")))
- return tr
-
def identify_contours(data_source, field, min_val, max_val,
cached_fields=None):
- cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
- pbar = get_pbar("First pass", len(data_source._grids))
- grids = sorted(data_source._grids, key=lambda g: -g.Level)
+ tree = amr_utils.ContourTree()
+ gct = amr_utils.TileContourTree(min_val, max_val)
total_contours = 0
- tree = []
- for gi,grid in enumerate(grids):
- pbar.update(gi+1)
- cm = data_source._get_cut_mask(grid)
- if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
- old_field_parameters = grid.field_parameters
- grid.field_parameters = data_source.field_parameters
- local_ind = np.where( (grid[field] > min_val)
- & (grid[field] < max_val) & cm )
- grid.field_parameters = old_field_parameters
- if local_ind[0].size == 0: continue
- kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
- grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
- grid["tempContours"][local_ind] = kk[:]
- cur_max_id -= local_ind[0].size
- xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
- cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
- fd_orig = grid["tempContours"].copy()
- xi = xi_u[cor_order]
- yi = yi_u[cor_order]
- zi = zi_u[cor_order]
- while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
- pass
- total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
- new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
- tree += zip(new_contours, new_contours)
- tree = set(tree)
+ contours = {}
+ empty_mask = np.ones((1,1,1), dtype="uint8")
+ node_ids = []
+ for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+ node.node_ind = len(node_ids)
+ nid = node.node_id
+ node_ids.append(nid)
+ values = g[field][sl].astype("float64")
+ contour_ids = np.zeros(dims, "int64") - 1
+ gct.identify_contours(values, contour_ids, total_contours)
+ new_contours = tree.cull_candidates(contour_ids)
+ total_contours += new_contours.shape[0]
+ tree.add_contours(new_contours)
+ # Now we can create a partitioned grid with the contours.
+ pg = amr_utils.PartitionedGrid(g.id,
+ [contour_ids.view("float64")],
+ empty_mask, g.dds * gi, g.dds * (gi + dims),
+ dims.astype("int64"))
+ contours[nid] = (g.Level, node.node_ind, pg, sl)
+ node_ids = np.array(node_ids)
+ trunk = data_source.tiles.tree.trunk
+ mylog.info("Linking node (%s) contours.", len(contours))
+ amr_utils.link_node_contours(trunk, contours, tree, node_ids)
+ mylog.info("Linked.")
+ #joins = tree.cull_joins(bt)
+ #tree.add_joins(joins)
+ joins = tree.export()
+ contour_ids = defaultdict(list)
+ pbar = get_pbar("Updating joins ... ", len(contours))
+ final_joins = np.unique(joins[:,1])
+ for i, nid in enumerate(sorted(contours)):
+ level, node_ind, pg, sl = contours[nid]
+ ff = pg.my_data[0].view("int64")
+ amr_utils.update_joins(joins, ff, final_joins)
+ contour_ids[pg.parent_grid_id].append((sl, ff))
+ pbar.update(i)
pbar.finish()
- pbar = get_pbar("Calculating joins ", len(data_source._grids))
- grid_set = set()
- for gi,grid in enumerate(grids):
- pbar.update(gi)
- cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
- grid_set.update(set(cg._grids))
- fd = cg["tempContours"].astype('int64')
- boundary_tree = amr_utils.construct_boundary_relationships(fd)
- tree.update(((a, b) for a, b in boundary_tree))
- pbar.finish()
- sort_new = np.array(list(tree), dtype='int64')
- mylog.info("Coalescing %s joins", sort_new.shape[0])
- joins = coalesce_join_tree(sort_new)
- #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
- pbar = get_pbar("Joining ", len(joins))
- # This process could and should be done faster
- print "Joining..."
- t1 = time.time()
- ff = data_source["tempContours"].astype("int64")
- amr_utils.update_joins(joins, ff)
- data_source["tempContours"] = ff.astype("float64")
- #for i, new in enumerate(sorted(joins.keys())):
- # pbar.update(i)
- # old_set = joins[new]
- # for old in old_set:
- # if old == new: continue
- # i1 = (data_source["tempContours"] == old)
- # data_source["tempContours"][i1] = new
- t2 = time.time()
- print "Finished joining in %0.2e seconds" % (t2-t1)
- pbar.finish()
- data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
- del data_source.field_data["tempContours"] # Force a reload from the grids
- data_source.get_data("tempContours")
- contour_ind = {}
- i = 0
- for contour_id in np.unique(data_source["tempContours"]):
- if contour_id == -1: continue
- contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
- mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
- i += 1
- mylog.info("Identified %s contours between %0.5e and %0.5e",
- len(contour_ind.keys()),min_val,max_val)
- for grid in chain(grid_set):
- grid.field_data.pop("tempContours", None)
- del data_source.field_data["tempContours"]
- return contour_ind
+ rv = dict()
+ rv.update(contour_ids)
+ # NOTE: Because joins can appear in both a "final join" and a subsequent
+ # "join", we can't know for sure how many unique joins there are without
+ # checking if no cells match or doing an expensive operation checking for
+ # the unique set of final join values.
+ return final_joins.size, rv
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
if isinstance(arg, types.StringTypes):
if os.path.exists(arg):
valid_file.append(True)
+ elif arg.startswith("http"):
+ valid_file.append(True)
else:
if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
valid_file.append(True)
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -167,12 +167,12 @@
Parameters
----------
- axis : int
- The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
field : string
This is the field which will be "projected" along the axis. If
multiple are specified (in a list) they will all be projected in
the first pass.
+ axis : int
+ The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
weight_field : string
If supplied, the field being projected will be multiplied by this
weight value before being integrated, and at the conclusion of the
@@ -274,11 +274,12 @@
for chunk in self.data_source.chunks([], "io"):
self._initialize_chunk(chunk, tree)
# This needs to be parallel_objects-ified
- for chunk in parallel_objects(self.data_source.chunks(
- chunk_fields, "io")):
- mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
- get_memory_usage()/1024.)
- self._handle_chunk(chunk, fields, tree)
+ with self.data_source._field_parameter_state(self.field_parameters):
+ for chunk in parallel_objects(self.data_source.chunks(
+ chunk_fields, "io")):
+ mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+ get_memory_usage()/1024.)
+ self._handle_chunk(chunk, fields, tree)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
merge_style = -1
@@ -308,6 +309,7 @@
nvals *= convs[None,:]
# We now convert to half-widths and center-points
data = {}
+ #non_nan = ~np.any(np.isnan(nvals), axis=-1)
data['px'] = px
data['py'] = py
data['weight_field'] = nwvals
@@ -319,8 +321,9 @@
field_data = np.hsplit(data.pop('fields'), len(fields))
for fi, field in enumerate(fields):
mylog.debug("Setting field %s", field)
- self[field] = field_data[fi].ravel()
- for i in data.keys(): self[i] = data.pop(i)
+ self[field] = field_data[fi].ravel()#[non_nan]
+ for i in data.keys():
+ self[i] = data.pop(i)#[non_nan]
mylog.info("Projection completed")
def _initialize_chunk(self, chunk, tree):
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -33,6 +33,8 @@
ParallelAnalysisInterface
from yt.utilities.parameter_file_storage import \
ParameterFileStore
+from yt.utilities.amr_kdtree.api import \
+ AMRKDTree
from .derived_quantities import DerivedQuantityCollection
from .field_info_container import \
NeedsGridType, ValidateSpatial
@@ -365,6 +367,13 @@
return s
@contextmanager
+ def _field_parameter_state(self, field_parameters):
+ old_field_parameters = self.field_parameters
+ self.field_parameters = field_parameters
+ yield
+ self.field_parameters = old_field_parameters
+
+ @contextmanager
def _field_type_state(self, ftype, finfo, obj = None):
if obj is None: obj = self
old_particle_type = obj._current_particle_type
@@ -407,6 +416,14 @@
explicit_fields.append((ftype, fname))
return explicit_fields
+ _tree = None
+
+ @property
+ def tiles(self):
+ if self._tree is not None: return self._tree
+ self._tree = AMRKDTree(self.pf, data_source=self)
+ return self._tree
+
@property
def blocks(self):
for io_chunk in self.chunks([], "io"):
@@ -751,11 +768,13 @@
self._grids = None
self.quantities = DerivedQuantityCollection(self)
- def cut_region(self, field_cuts):
+ def cut_region(self, field_cuts, field_parameters = None):
"""
- Return an InLineExtractedRegion, where the grid cells are cut on the
- fly with a set of field_cuts. It is very useful for applying
- conditions to the fields in your data object.
+ Return an InLineExtractedRegion, where the object cells are cut on the
+ fly with a set of field_cuts. It is very useful for applying
+ conditions to the fields in your data object. Note that in previous
+ versions of yt, this accepted 'grid' as a variable, but presently it
+ requires 'obj'.
Examples
--------
@@ -763,19 +782,12 @@
>>> pf = load("RedshiftOutput0005")
>>> ad = pf.h.all_data()
- >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+ >>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
>>> print cr.quantities["TotalQuantity"]("CellMassMsun")
-
"""
- return YTValueCutExtractionBase(self, field_cuts)
-
- def extract_region(self, indices):
- """
- Return an ExtractedRegion where the points contained in it are defined
- as the points in `this` data object with the given *indices*.
- """
- fp = self.field_parameters.copy()
- return YTSelectedIndicesBase(self, indices, field_parameters = fp)
+ cr = self.pf.h.cut_region(self, field_cuts,
+ field_parameters = field_parameters)
+ return cr
def extract_isocontours(self, field, value, filename = None,
rescale = False, sample_values = None):
@@ -966,12 +978,15 @@
ff, mask, grid.LeftEdge, grid.dds)
def extract_connected_sets(self, field, num_levels, min_val, max_val,
- log_space=True, cumulative=True, cache=False):
+ log_space=True, cumulative=True):
"""
This function will create a set of contour objects, defined
by having connected cell structures, which can then be
studied and used to 'paint' their source grids, thus enabling
them to be plotted.
+
+ Note that this function *can* return a connected set object that has no
+ member values.
"""
if log_space:
cons = np.logspace(np.log10(min_val),np.log10(max_val),
@@ -979,8 +994,6 @@
else:
cons = np.linspace(min_val, max_val, num_levels+1)
contours = {}
- if cache: cached_fields = defaultdict(lambda: dict())
- else: cached_fields = None
for level in range(num_levels):
contours[level] = {}
if cumulative:
@@ -988,10 +1001,11 @@
else:
mv = cons[level+1]
from yt.analysis_modules.level_sets.api import identify_contours
- cids = identify_contours(self, field, cons[level], mv,
- cached_fields)
- for cid, cid_ind in cids.items():
- contours[level][cid] = self.extract_region(cid_ind)
+ nj, cids = identify_contours(self, field, cons[level], mv)
+ for cid in range(nj):
+ contours[level][cid] = self.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
return cons, contours
def paint_grids(self, field, value, default_value=None):
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -583,3 +583,85 @@
self.set_field_parameter('e0', e0)
self.set_field_parameter('e1', e1)
self.set_field_parameter('e2', e2)
+
+class YTCutRegionBase(YTSelectionContainer3D):
+ """
+ This is a data object designed to allow individuals to apply logical
+ operations to fields or particles and filter as a result of those cuts.
+
+ Parameters
+ ----------
+ base_object : YTSelectionContainer3D
+ The object to which cuts will be applied.
+ conditionals : list of strings
+ A list of conditionals that will be evaluated. In the namespace
+ available, these conditionals will have access to 'obj' which is a data
+ object of unknown shape, and they must generate a boolean array. For
+ instance, conditionals = ["obj['temperature'] < 1e3"]
+
+ Examples
+ --------
+
+ >>> pf = load("DD0010/moving7_0010")
+ >>> sp = pf.h.sphere("max", (1.0, 'mpc'))
+ >>> cr = pf.h.cut_region(sp, ["obj['temperature'] < 1e3"])
+ """
+ _type_name = "cut_region"
+ _con_args = ("base_object", "conditionals")
+ def __init__(self, base_object, conditionals, pf = None,
+ field_parameters = None):
+ super(YTCutRegionBase, self).__init__(base_object.center, pf, field_parameters)
+ self.conditionals = ensure_list(conditionals)
+ self.base_object = base_object
+ self._selector = None
+ # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
+ # ires and get_data
+
+ @property
+ def selector(self):
+ raise NotImplementedError
+
+ def chunks(self, fields, chunking_style, **kwargs):
+ # We actually want to chunk the sub-chunk, not ourselves. We have no
+ # chunks to speak of, as we do not data IO.
+ for chunk in self.hierarchy._chunk(self.base_object,
+ chunking_style,
+ **kwargs):
+ with self.base_object._chunked_read(chunk):
+ self.get_data(fields)
+ yield self
+
+ def get_data(self, fields = None):
+ fields = ensure_list(fields)
+ self.base_object.get_data(fields)
+ ind = self._cond_ind
+ for field in fields:
+ self.field_data[field] = self.base_object[field][ind]
+
+ @property
+ def _cond_ind(self):
+ ind = None
+ obj = self.base_object
+ with obj._field_parameter_state(self.field_parameters):
+ for cond in self.conditionals:
+ res = eval(cond)
+ if ind is None: ind = res
+ np.logical_and(res, ind, ind)
+ return ind
+
+ @property
+ def icoords(self):
+ return self.base_object.icoords[self._cond_ind,:]
+
+ @property
+ def fcoords(self):
+ return self.base_object.fcoords[self._cond_ind,:]
+
+ @property
+ def ires(self):
+ return self.base_object.ires[self._cond_ind]
+
+ @property
+ def fwidth(self):
+ return self.base_object.fwidth[self._cond_ind,:]
+
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
- if not os.path.exists(apath): raise IOError(filename)
+ #if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
if obj._skip_cache is False:
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -6,15 +6,14 @@
def test_cut_region():
# We decompose in different ways
- return #TESTDISABLED
for nprocs in [1, 2, 4, 8]:
pf = fake_random_pf(64, nprocs = nprocs,
fields = ("Density", "Temperature", "x-velocity"))
# We'll test two objects
dd = pf.h.all_data()
- r = dd.cut_region( [ "grid['Temperature'] > 0.5",
- "grid['Density'] < 0.75",
- "grid['x-velocity'] > 0.25" ])
+ r = dd.cut_region( [ "obj['Temperature'] > 0.5",
+ "obj['Density'] < 0.75",
+ "obj['x-velocity'] > 0.25" ])
t = ( (dd["Temperature"] > 0.5 )
& (dd["Density"] < 0.75 )
& (dd["x-velocity"] > 0.25 ) )
@@ -23,33 +22,21 @@
yield assert_equal, np.all(r["x-velocity"] > 0.25), True
yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+ r2 = r.cut_region( [ "obj['Temperature'] < 0.75" ] )
t2 = (r["Temperature"] < 0.75)
yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
yield assert_equal, np.all(r2["Temperature"] < 0.75), True
-def test_extract_region():
- # We decompose in different ways
- return #TESTDISABLED
- for nprocs in [1, 2, 4, 8]:
- pf = fake_random_pf(64, nprocs = nprocs,
- fields = ("Density", "Temperature", "x-velocity"))
- # We'll test two objects
+ # Now we can test some projections
dd = pf.h.all_data()
- t = ( (dd["Temperature"] > 0.5 )
- & (dd["Density"] < 0.75 )
- & (dd["x-velocity"] > 0.25 ) )
- r = dd.extract_region(t)
- yield assert_equal, np.all(r["Temperature"] > 0.5), True
- yield assert_equal, np.all(r["Density"] < 0.75), True
- yield assert_equal, np.all(r["x-velocity"] > 0.25), True
- yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
- yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- t2 = (r["Temperature"] < 0.75)
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
- yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
- yield assert_equal, np.all(r2["Temperature"] < 0.75), True
- t3 = (r["Temperature"] < 0.75)
- r3 = r.extract_region( t3 )
- yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
- yield assert_equal, np.all(r3["Temperature"] < 0.75), True
+ cr = dd.cut_region(["obj['Ones'] > 0"])
+ for weight in [None, "Density"]:
+ p1 = pf.h.proj("Density", 0, data_source=dd, weight_field=weight)
+ p2 = pf.h.proj("Density", 0, data_source=cr, weight_field=weight)
+ for f in p1.field_data:
+ yield assert_almost_equal, p1[f], p2[f]
+ cr = dd.cut_region(["obj['Density'] > 0.25"])
+ p2 = pf.h.proj("Density", 2, data_source=cr)
+ yield assert_equal, p2["Density"].max() > 0.25, True
+ p2 = pf.h.proj("Density", 2, data_source=cr, weight_field = "Density")
+ yield assert_equal, p2["Density"].max() > 0.25, True
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -582,12 +582,18 @@
units=r"\rm{s}^{-1}")
def _Contours(field, data):
- return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
- display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
- validators=[ValidateSpatial(0), ValidateGridType()],
- take_log=False, display_field=False)
+ fd = data.get_field_parameter("contour_slices")
+ vals = data["Ones"] * -1
+ if fd is None or fd == 0.0:
+ return vals
+ for sl, v in fd.get(data.id, []):
+ vals[sl] = v
+ return vals
+add_field("Contours", validators=[ValidateSpatial(0)],
+ take_log=False,
+ display_field=False,
+ projection_conversion="1",
+ function=_Contours)
def obtain_velocities(data):
return obtain_rv_vec(data)
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -20,6 +20,7 @@
import weakref
import struct
import glob
+import time
import os
from yt.utilities.fortran_utils import read_record
@@ -50,6 +51,11 @@
particle_deposition_functions, \
standard_particle_fields
+try:
+ import requests
+ import json
+except ImportError:
+ requests = None
class ParticleFile(object):
def __init__(self, pf, io, filename, file_id):
@@ -562,3 +568,79 @@
def _is_valid(self, *args, **kwargs):
# We do not allow load() of these files.
return False
+
+class HTTPParticleFile(ParticleFile):
+ pass
+
+class HTTPStreamStaticOutput(ParticleStaticOutput):
+ _hierarchy_class = ParticleGeometryHandler
+ _file_class = HTTPParticleFile
+ _fieldinfo_fallback = GadgetFieldInfo
+ _fieldinfo_known = KnownGadgetFields
+ _particle_mass_name = "Mass"
+ _particle_coordinates_name = "Coordinates"
+ _particle_velocity_name = "Velocities"
+ filename_template = ""
+
+ def __init__(self, base_url,
+ data_style = "http_particle_stream",
+ n_ref = 64, over_refine_factor=1):
+ if requests is None:
+ raise RuntimeError
+ self.base_url = base_url
+ self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
+ super(HTTPStreamStaticOutput, self).__init__("", data_style)
+
+ def __repr__(self):
+ return self.base_url
+
+ def _parse_parameter_file(self):
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.parameters["HydroMethod"] = "sph"
+
+ # Here's where we're going to grab the JSON index file
+ hreq = requests.get(self.base_url + "/yt_index.json")
+ if hreq.status_code != 200:
+ raise RuntimeError
+ header = json.loads(hreq.content)
+ header['particle_count'] = dict((int(k), header['particle_count'][k])
+ for k in header['particle_count'])
+ self.parameters = header
+
+ # Now we get what we need
+ self.domain_left_edge = np.array(header['domain_left_edge'], "float64")
+ self.domain_right_edge = np.array(header['domain_right_edge'], "float64")
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
+ self.periodicity = (True, True, True)
+
+ self.current_time = header['current_time']
+ self.unique_identifier = header.get("unique_identifier", time.time())
+ self.cosmological_simulation = int(header['cosmological_simulation'])
+ for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
+ 'hubble_constant'):
+ setattr(self, attr, float(header[attr]))
+
+ self.file_count = header['num_files']
+
+ def _set_units(self):
+ length_unit = float(self.parameters['units']['length'])
+ time_unit = float(self.parameters['units']['time'])
+ mass_unit = float(self.parameters['units']['mass'])
+ density_unit = mass_unit / length_unit ** 3
+ velocity_unit = length_unit / time_unit
+ self._unit_base = {}
+ self._unit_base['cm'] = 1.0/length_unit
+ self._unit_base['s'] = 1.0/time_unit
+ super(HTTPStreamStaticOutput, self)._set_units()
+ self.conversion_factors["velocity"] = velocity_unit
+ self.conversion_factors["mass"] = mass_unit
+ self.conversion_factors["density"] = density_unit
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ if args[0].startswith("http://"):
+ return True
+ return False
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -28,6 +28,11 @@
from yt.geometry.oct_container import _ORDER_MAX
+try:
+ import requests
+except ImportError:
+ requests = None
+
CHUNKSIZE = 10000000
def _get_h5_handle(fn):
@@ -543,3 +548,90 @@
size = self._pdtypes[ptype].itemsize
pos += data_file.total_particles[ptype] * size
return field_offsets
+
+class IOHandlerHTTPStream(BaseIOHandler):
+ _data_style = "http_particle_stream"
+ _vector_fields = ("Coordinates", "Velocity", "Velocities")
+
+ def __init__(self, pf):
+ if requests is None:
+ raise RuntimeError
+ self._url = pf.base_url
+ # This should eventually manage the IO and cache it
+ self.total_bytes = 0
+ super(IOHandlerHTTPStream, self).__init__(pf)
+
+ def _open_stream(self, data_file, field):
+ # This does not actually stream yet!
+ ftype, fname = field
+ s = "%s/%s/%s/%s" % (self._url,
+ data_file.file_id, ftype, fname)
+ mylog.info("Loading URL %s", s)
+ resp = requests.get(s)
+ if resp.status_code != 200:
+ raise RuntimeError
+ self.total_bytes += len(resp.content)
+ return resp.content
+
+ def _identify_fields(self, data_file):
+ f = []
+ for ftype, fname in self.pf.parameters["field_list"]:
+ f.append((str(ftype), str(fname)))
+ return f
+
+ def _read_particle_coords(self, chunks, ptf):
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype in ptf:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ yield ptype, (c[:,0], c[:,1], c[:,2])
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype, field_list in sorted(ptf.items()):
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ mask = selector.select_points(
+ c[:,0], c[:,1], c[:,2])
+ del c
+ if mask is None: continue
+ for field in field_list:
+ s = self._open_stream(data_file, (ptype, field))
+ c = np.frombuffer(s, dtype="float64")
+ if field in self._vector_fields:
+ c.shape = (c.shape[0]/3.0, 3)
+ data = c[mask, ...]
+ yield (ptype, field), data
+
+ def _initialize_index(self, data_file, regions):
+ header = self.pf.parameters
+ ptypes = header["particle_count"][data_file.file_id].keys()
+ pcount = sum(header["particle_count"][data_file.file_id].values())
+ morton = np.empty(pcount, dtype='uint64')
+ ind = 0
+ for ptype in ptypes:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ regions.add_data_file(c, data_file.file_id)
+ morton[ind:ind+c.shape[0]] = compute_morton(
+ c[:,0], c[:,1], c[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ ind += c.shape[0]
+ return morton
+
+ def _count_particles(self, data_file):
+ return self.pf.parameters["particle_count"][data_file.file_id]
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -688,7 +688,7 @@
if len(self.queue) == 0: raise StopIteration
chunk = YTDataChunk(None, "cache", self.queue, cache=False)
self.cache = self.geometry_handler.io._read_chunk_data(
- chunk, self.preload_fields)
+ chunk, self.preload_fields) or {}
g = self.queue.pop(0)
g._initialize_cache(self.cache.pop(g.id, {}))
return g
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -273,7 +273,9 @@
giter = sorted(gobjs, key = -g.Level)
elif sort is None:
giter = gobjs
- if self._preload_implemented and preload_fields is not None and ngz == 0:
+ if preload_fields is None: preload_fields = []
+ preload_fields, _ = self._split_fields(preload_fields)
+ if self._preload_implemented and len(preload_fields) > 0 and ngz == 0:
giter = ChunkDataCache(list(giter), preload_fields, self)
for i, og in enumerate(giter):
if ngz > 0:
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -184,6 +184,24 @@
for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
yield self.get_brick_data(node)
+ def slice_traverse(self, viewpoint = None):
+ if not hasattr(self.pf.h, "grid"):
+ raise NotImplementedError
+ for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+ grid = self.pf.h.grids[node.grid - self._id_offset]
+ dds = grid.dds
+ gle = grid.LeftEdge
+ nle = get_left_edge(node)
+ nre = get_right_edge(node)
+ li = np.rint((nle-gle)/dds).astype('int32')
+ ri = np.rint((nre-gle)/dds).astype('int32')
+ dims = (ri - li).astype('int32')
+ sl = (slice(li[0], ri[0]),
+ slice(li[1], ri[1]),
+ slice(li[2], ri[2]))
+ gi = grid.get_global_startindex() + li
+ yield grid, node, (sl, dims, gi)
+
def get_node(self, nodeid):
path = np.binary_repr(nodeid)
depth = 1
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -444,19 +444,28 @@
if new_result is None:
return
assert(len(new_result) == len(old_result))
+ nind, oind = None, None
for k in new_result:
assert (k in old_result)
+ if oind is None:
+ oind = np.array(np.isnan(old_result[k]))
+ np.logical_or(oind, np.isnan(old_result[k]), oind)
+ if nind is None:
+ nind = np.array(np.isnan(new_result[k]))
+ np.logical_or(nind, np.isnan(new_result[k]), nind)
+ oind = ~oind
+ nind = ~nind
for k in new_result:
err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
(k, self.field, self.weight_field, self.axis)
if k == 'weight_field' and self.weight_field is None:
continue
+ nres, ores = new_result[k][nind], old_result[k][oind]
if self.decimals is None:
- assert_equal(new_result[k], old_result[k],
- err_msg=err_msg)
+ assert_equal(nres, ores, err_msg=err_msg)
else:
- assert_allclose(new_result[k], old_result[k],
- 10.**-(self.decimals), err_msg=err_msg)
+ assert_allclose(nres, ores, 10.**-(self.decimals),
+ err_msg=err_msg)
class PixelizedProjectionValuesTest(AnswerTestingTest):
_type_name = "PixelizedProjectionValues"
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,9 +18,17 @@
cimport cython
from libc.stdlib cimport malloc, free
+from amr_kdtools cimport _find_node, Node
+from grid_traversal cimport VolumeContainer, PartitionedGrid, \
+ vc_index, vc_pos_index
+
cdef extern from "math.h":
double fabs(double x)
+cdef extern from "stdlib.h":
+ # NOTE that size_t might not be int
+ void *alloca(int)
+
cdef inline np.int64_t i64max(np.int64_t i0, np.int64_t i1):
if i0 > i1: return i0
return i1
@@ -29,87 +37,407 @@
if i0 < i1: return i0
return i1
-cdef extern from "union_find.h":
- ctypedef struct forest_node:
- void *value
- forest_node *parent
- int rank
+cdef struct ContourID
- forest_node* MakeSet(void* value)
- void Union(forest_node* node1, forest_node* node2)
- forest_node* Find(forest_node* node)
+cdef struct ContourID:
+ np.int64_t contour_id
+ ContourID *parent
+ ContourID *next
+ ContourID *prev
-ctypedef struct CellIdentifier:
- np.int64_t hindex
- int level
+cdef ContourID *contour_create(np.int64_t contour_id,
+ ContourID *prev = NULL):
+ node = <ContourID *> malloc(sizeof(ContourID))
+ #print "Creating contour with id", contour_id
+ node.contour_id = contour_id
+ node.next = node.parent = NULL
+ node.prev = prev
+ if prev != NULL: prev.next = node
+ return node
-cdef class GridContourContainer:
- cdef np.int64_t dims[3]
- cdef np.int64_t start_indices[3]
- cdef forest_node **join_tree
- cdef np.int64_t ncells
+cdef void contour_delete(ContourID *node):
+ if node.prev != NULL: node.prev.next = node.next
+ if node.next != NULL: node.next.prev = node.prev
+ free(node)
- def __init__(self, dimensions, indices):
- cdef int i
- self.ncells = 1
- for i in range(3):
- self.ncells *= dimensions[i]
- self.dims[i] = dimensions[i]
- self.start_indices[i] = indices[i]
- self.join_tree = <forest_node **> malloc(sizeof(forest_node)
- * self.ncells)
- for i in range(self.ncells): self.join_tree[i] = NULL
+cdef ContourID *contour_find(ContourID *node):
+ cdef ContourID *temp, *root
+ root = node
+ while root.parent != NULL and root.parent != root:
+ root = root.parent
+ if root == root.parent: root.parent = NULL
+ while node.parent != NULL:
+ temp = node.parent
+ node.parent = root
+ node = temp
+ return root
+cdef void contour_union(ContourID *node1, ContourID *node2):
+ if node1.contour_id < node2.contour_id:
+ node2.parent = node1
+ elif node2.contour_id < node1.contour_id:
+ node1.parent = node2
+
+cdef struct CandidateContour
+
+cdef struct CandidateContour:
+ np.int64_t contour_id
+ np.int64_t join_id
+ CandidateContour *next
+
+cdef int candidate_contains(CandidateContour *first,
+ np.int64_t contour_id,
+ np.int64_t join_id = -1):
+ while first != NULL:
+ if first.contour_id == contour_id \
+ and first.join_id == join_id: return 1
+ first = first.next
+ return 0
+
+cdef CandidateContour *candidate_add(CandidateContour *first,
+ np.int64_t contour_id,
+ np.int64_t join_id = -1):
+ cdef CandidateContour *node
+ node = <CandidateContour *> malloc(sizeof(CandidateContour))
+ node.contour_id = contour_id
+ node.join_id = join_id
+ node.next = first
+ return node
+
+cdef class ContourTree:
+ # This class is essentially a Union-Find algorithm. What we want to do is
+ # to, given a connection between two objects, identify the unique ID for
+ # those two objects. So what we have is a collection of contours, and they
+ # eventually all get joined and contain lots of individual IDs. But it's
+ # easy to find the *first* contour, i.e., the primary ID, for each of the
+ # subsequent IDs.
+ #
+ # This means that we can connect id 202483 to id 2472, and if id 2472 is
+ # connected to id 143, the connection will *actually* be from 202483 to
+ # 143. In this way we can speed up joining things and knowing their
+ # "canonical" id.
+ #
+ # This is a multi-step process, since we first want to connect all of the
+ # contours, then we end up wanting to coalesce them, and ultimately we join
+ # them at the end. The join produces a table that maps the initial to the
+ # final, and we can go through and just update all of those.
+ cdef ContourID *first
+ cdef ContourID *last
+
+ def clear(self):
+ # Here, we wipe out ALL of our contours, but not the pointers to them
+ cdef ContourID *cur, *next
+ cur = self.first
+ while cur != NULL:
+ next = cur.next
+ free(cur)
+ cur = next
+ self.first = self.last = NULL
+
+ def __init__(self):
+ self.first = self.last = NULL
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
+ # This adds new contours, from the given contour IDs, to the tree.
+ # Each one can be connected to a parent, as well as to next/prev in the
+ # set of contours belonging to this tree.
+ cdef int i, n
+ n = contour_ids.shape[0]
+ cdef ContourID *cur = self.last
+ for i in range(n):
+ #print i, contour_ids[i]
+ cur = contour_create(contour_ids[i], cur)
+ if self.first == NULL: self.first = cur
+ self.last = cur
+
+ def add_contour(self, np.int64_t contour_id):
+ self.last = contour_create(contour_id, self.last)
+
+ def cull_candidates(self, np.ndarray[np.int64_t, ndim=3] candidates):
+ # This function looks at each preliminary contour ID belonging to a
+ # given collection of values, and then if need be it creates a new
+ # contour for it.
+ cdef int i, j, k, ni, nj, nk, nc
+ cdef CandidateContour *first = NULL
+ cdef CandidateContour *temp
+ cdef np.int64_t cid
+ nc = 0
+ ni = candidates.shape[0]
+ nj = candidates.shape[1]
+ nk = candidates.shape[2]
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ cid = candidates[i,j,k]
+ if cid == -1: continue
+ if candidate_contains(first, cid) == 0:
+ nc += 1
+ first = candidate_add(first, cid)
+ cdef np.ndarray[np.int64_t, ndim=1] contours
+ contours = np.empty(nc, dtype="int64")
+ i = 0
+ # This removes all the temporary contours for this set of contours and
+ # instead constructs a final list of them.
+ while first != NULL:
+ contours[i] = first.contour_id
+ i += 1
+ temp = first.next
+ free(first)
+ first = temp
+ return contours
+
+ def cull_joins(self, np.ndarray[np.int64_t, ndim=2] cjoins):
+ # This coalesces contour IDs, so that we have only the final name
+ # resolutions -- the .join_id from a candidate. So many items will map
+ # to a single join_id.
+ cdef int i, j, k, ni, nj, nk, nc
+ cdef CandidateContour *first = NULL
+ cdef CandidateContour *temp
+ cdef np.int64_t cid1, cid2
+ nc = 0
+ ni = cjoins.shape[0]
+ for i in range(ni):
+ cid1 = cjoins[i,0]
+ cid2 = cjoins[i,1]
+ if cid1 == -1: continue
+ if cid2 == -1: continue
+ if candidate_contains(first, cid1, cid2) == 0:
+ nc += 1
+ first = candidate_add(first, cid1, cid2)
+ cdef np.ndarray[np.int64_t, ndim=2] contours
+ contours = np.empty((nc,2), dtype="int64")
+ i = 0
+ while first != NULL:
+ contours[i,0] = first.contour_id
+ contours[i,1] = first.join_id
+ i += 1
+ temp = first.next
+ free(first)
+ first = temp
+ return contours
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
+ cdef int i, n, ins
+ cdef np.int64_t cid1, cid2
+ # Okay, this requires lots of iteration, unfortunately
+ cdef ContourID *cur, *root
+ n = join_tree.shape[0]
+ #print "Counting"
+ #print "Checking", self.count()
+ for i in range(n):
+ ins = 0
+ cid1 = join_tree[i, 0]
+ cid2 = join_tree[i, 1]
+ c1 = c2 = NULL
+ cur = self.first
+ #print "Looking for ", cid1, cid2
+ while c1 == NULL or c2 == NULL:
+ if cur.contour_id == cid1:
+ c1 = contour_find(cur)
+ if cur.contour_id == cid2:
+ c2 = contour_find(cur)
+ ins += 1
+ cur = cur.next
+ if cur == NULL: break
+ if c1 == NULL or c2 == NULL:
+ if c1 == NULL: print " Couldn't find ", cid1
+ if c2 == NULL: print " Couldn't find ", cid2
+ print " Inspected ", ins
+ raise RuntimeError
+ else:
+ contour_union(c1, c2)
+
+ def count(self):
+ cdef int n = 0
+ cdef ContourID *cur = self.first
+ while cur != NULL:
+ cur = cur.next
+ n += 1
+ return n
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def export(self):
+ cdef int n = self.count()
+ cdef ContourID *cur, *root
+ cur = self.first
+ cdef np.ndarray[np.int64_t, ndim=2] joins
+ joins = np.empty((n, 2), dtype="int64")
+ n = 0
+ while cur != NULL:
+ root = contour_find(cur)
+ joins[n, 0] = cur.contour_id
+ joins[n, 1] = root.contour_id
+ cur = cur.next
+ n += 1
+ return joins
+
def __dealloc__(self):
- cdef int i
- for i in range(self.ncells):
- if self.join_tree[i] != NULL: free(self.join_tree[i])
- free(self.join_tree)
+ self.clear()
- #def construct_join_tree(self,
- # np.ndarray[np.float64_t, ndim=3] field,
- # np.ndarray[np.bool_t, ndim=3] mask):
- # # This only looks at the components of the grid that are actually
- # # inside this grid -- boundary conditions are handled later.
- # pass
+cdef class TileContourTree:
+ cdef np.float64_t min_val
+ cdef np.float64_t max_val
-#@cython.boundscheck(False)
-#@cython.wraparound(False)
-def construct_boundary_relationships(
- np.ndarray[dtype=np.int64_t, ndim=3] contour_ids):
- # We only look at the boundary and one cell in
- cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj
+ def __init__(self, np.float64_t min_val, np.float64_t max_val):
+ self.min_val = min_val
+ self.max_val = max_val
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def identify_contours(self, np.ndarray[np.float64_t, ndim=3] values,
+ np.ndarray[np.int64_t, ndim=3] contour_ids,
+ np.int64_t start):
+ # This just looks at neighbor values and tries to identify which zones
+ # are touching by face within a given brick.
+ cdef int i, j, k, ni, nj, nk, offset
+ cdef int off_i, off_j, off_k, oi, ok, oj
+ cdef ContourID *cur = NULL
+ cdef ContourID *c1, *c2
+ cdef np.float64_t v
+ cdef np.int64_t nc
+ ni = values.shape[0]
+ nj = values.shape[1]
+ nk = values.shape[2]
+ nc = 0
+ cdef ContourID **container = <ContourID**> malloc(
+ sizeof(ContourID*)*ni*nj*nk)
+ for i in range(ni*nj*nk): container[i] = NULL
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ v = values[i,j,k]
+ if v < self.min_val or v > self.max_val: continue
+ nc += 1
+ c1 = contour_create(nc + start)
+ cur = container[i*nj*nk + j*nk + k] = c1
+ for oi in range(3):
+ off_i = oi - 1 + i
+ if not (0 <= off_i < ni): continue
+ for oj in range(3):
+ off_j = oj - 1 + j
+ if not (0 <= off_j < nj): continue
+ for ok in range(3):
+ if oi == oj == ok == 1: continue
+ if off_k > k and off_j > j and off_i > i:
+ continue
+ off_k = ok - 1 + k
+ if not (0 <= off_k < nk): continue
+ offset = off_i*nj*nk + off_j*nk + off_k
+ c2 = container[offset]
+ if c2 == NULL: continue
+ c2 = contour_find(c2)
+ contour_union(cur, c2)
+ cur = contour_find(cur)
+ for i in range(ni):
+ for j in range(nj):
+ for k in range(nk):
+ c1 = container[i*nj*nk + j*nk + k]
+ if c1 == NULL: continue
+ cur = c1
+ c1 = contour_find(c1)
+ contour_ids[i,j,k] = c1.contour_id
+
+ for i in range(ni*nj*nk):
+ if container[i] != NULL: free(container[i])
+ free(container)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def link_node_contours(Node trunk, contours, ContourTree tree,
+ np.ndarray[np.int64_t, ndim=1] node_ids):
+ cdef int n_nodes = node_ids.shape[0]
+ cdef np.int64_t node_ind
+ cdef VolumeContainer **vcs = <VolumeContainer **> malloc(
+ sizeof(VolumeContainer*) * n_nodes)
+ cdef int i
+ cdef PartitionedGrid pg
+ for i in range(n_nodes):
+ pg = contours[node_ids[i]][2]
+ vcs[i] = pg.container
+ cdef np.ndarray[np.uint8_t] examined = np.zeros(n_nodes, "uint8")
+ for nid, cinfo in sorted(contours.items(), key = lambda a: -a[1][0]):
+ level, node_ind, pg, sl = cinfo
+ construct_boundary_relationships(trunk, tree, node_ind,
+ examined, vcs, node_ids)
+ examined[node_ind] = 1
+
+cdef inline void get_spos(VolumeContainer *vc, int i, int j, int k,
+ int axis, np.float64_t *spos):
+ spos[0] = vc.left_edge[0] + i * vc.dds[0]
+ spos[1] = vc.left_edge[1] + j * vc.dds[1]
+ spos[2] = vc.left_edge[2] + k * vc.dds[2]
+ spos[axis] += 0.5 * vc.dds[axis]
+
+cdef inline int spos_contained(VolumeContainer *vc, np.float64_t *spos):
+ cdef int i
+ for i in range(3):
+ if spos[i] < vc.left_edge[i] or spos[i] > vc.right_edge[i]: return 0
+ return 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void construct_boundary_relationships(Node trunk, ContourTree tree,
+ np.int64_t nid, np.ndarray[np.uint8_t, ndim=1] examined,
+ VolumeContainer **vcs,
+ np.ndarray[np.int64_t, ndim=1] node_ids):
+ # We only look at the boundary and find the nodes next to it.
+ # Contours is a dict, keyed by the node.id.
+ cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
cdef np.int64_t c1, c2
- nx = contour_ids.shape[0]
- ny = contour_ids.shape[1]
- nz = contour_ids.shape[2]
+ cdef Node adj_node
+ cdef VolumeContainer *vc1, *vc0 = vcs[nid]
+ nx = vc0.dims[0]
+ ny = vc0.dims[1]
+ nz = vc0.dims[2]
+ cdef int s = (ny*nx + nx*nz + ny*nz) * 18
# We allocate an array of fixed (maximum) size
- cdef int s = (ny*nx + nx*nz + ny*nz - 2) * 18
- cdef np.ndarray[np.int64_t, ndim=2] tree = np.zeros((s, 2), dtype="int64")
+ cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
cdef int ti = 0
- # First x-pass
+ cdef int index
+ cdef np.float64_t spos[3]
+
+ # First the x-pass
for i in range(ny):
for j in range(nz):
for offset_i in range(3):
oi = offset_i - 1
- if i == 0 and oi == -1: continue
- if i == ny - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == nz - 1 and oj == 1: continue
- c1 = contour_ids[0, i, j]
- c2 = contour_ids[1, i + oi, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[nx-1, i, j]
- c2 = contour_ids[nx-2, i + oi, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
+ # Adjust by -1 in x, then oi and oj in y and z
+ get_spos(vc0, -1, i + oi, j + oj, 0, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, 0, i, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ # This is outside our vc
+ get_spos(vc0, nx, i + oi, j + oj, 0, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, nx - 1, i, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
# Now y-pass
for i in range(nx):
for j in range(nz):
@@ -119,43 +447,75 @@
if i == nx - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == nz - 1 and oj == 1: continue
- c1 = contour_ids[i, 0, j]
- c2 = contour_ids[i + oi, 1, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[i, ny-1, j]
- c2 = contour_ids[i + oi, ny-2, j + oj]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
+ get_spos(vc0, i + oi, -1, j + oj, 1, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, 0, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ get_spos(vc0, i + oi, ny, j + oj, 1, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, ny - 1, j)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ # Now z-pass
for i in range(nx):
for j in range(ny):
for offset_i in range(3):
oi = offset_i - 1
- if i == 0 and oi == -1: continue
- if i == nx - 1 and oi == 1: continue
for offset_j in range(3):
oj = offset_j - 1
- if j == 0 and oj == -1: continue
- if j == ny - 1 and oj == 1: continue
- c1 = contour_ids[i, j, 0]
- c2 = contour_ids[i + oi, j + oj, 1]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- c1 = contour_ids[i, j, nz-1]
- c2 = contour_ids[i + oi, j + oj, nz-2]
- if c1 > -1 and c2 > -1:
- tree[ti,0] = i64max(c1,c2)
- tree[ti,1] = i64min(c1,c2)
- ti += 1
- return tree[:ti,:]
+ get_spos(vc0, i + oi, j + oj, -1, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, 0)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+
+ get_spos(vc0, i + oi, j + oj, nz, 2, spos)
+ adj_node = _find_node(trunk, spos)
+ vc1 = vcs[adj_node.node_ind]
+ if examined[adj_node.node_ind] == 0 and \
+ spos_contained(vc1, spos):
+ # This is outside our VC, as 0 is a boundary layer
+ index = vc_index(vc0, i, j, nz - 1)
+ c1 = (<np.int64_t*>vc0.data[0])[index]
+ index = vc_pos_index(vc1, spos)
+ c2 = (<np.int64_t*>vc1.data[0])[index]
+ if c1 > -1 and c2 > -1:
+ joins[ti,0] = i64max(c1,c2)
+ joins[ti,1] = i64min(c1,c2)
+ ti += 1
+ if ti == 0: return
+ new_joins = tree.cull_joins(joins[:ti,:])
+ tree.add_joins(new_joins)
cdef inline int are_neighbors(
np.float64_t x1, np.float64_t y1, np.float64_t z1,
@@ -228,16 +588,23 @@
@cython.boundscheck(False)
@cython.wraparound(False)
-def update_joins(joins, np.ndarray[np.int64_t, ndim=1] contour_ids):
- cdef np.int64_t new, old, i, oi
- cdef int n, on
- cdef np.ndarray[np.int64_t, ndim=1] old_set
- #print contour_ids.shape[0]
- n = contour_ids.shape[0]
- for new, old_set in joins:
- #print new
- on = old_set.shape[0]
- for i in range(n):
- for oi in range(on):
- old = old_set[oi]
- if contour_ids[i] == old: contour_ids[i] = new
+def update_joins(np.ndarray[np.int64_t, ndim=2] joins,
+ np.ndarray[np.int64_t, ndim=3] contour_ids,
+ np.ndarray[np.int64_t, ndim=1] final_joins):
+ cdef np.int64_t new, old
+ cdef int i, j, nj, nf
+ cdef int ci, cj, ck
+ nj = joins.shape[0]
+ nf = final_joins.shape[0]
+ for ci in range(contour_ids.shape[0]):
+ for cj in range(contour_ids.shape[1]):
+ for ck in range(contour_ids.shape[2]):
+ if contour_ids[ci,cj,ck] == -1: continue
+ for j in range(nj):
+ if contour_ids[ci,cj,ck] == joins[j,0]:
+ contour_ids[ci,cj,ck] = joins[j,1]
+ break
+ for j in range(nf):
+ if contour_ids[ci,cj,ck] == final_joins[j]:
+ contour_ids[ci,cj,ck] = j + 1
+ break
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/utilities/lib/amr_kdtools.pxd
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -0,0 +1,39 @@
+"""
+AMR kD-Tree Cython Tools
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+
+cdef struct Split:
+ int dim
+ np.float64_t pos
+
+cdef class Node
+
+cdef class Node:
+ cdef public Node left
+ cdef public Node right
+ cdef public Node parent
+ cdef public int grid
+ cdef public np.int64_t node_id
+ cdef public np.int64_t node_ind
+ cdef np.float64_t left_edge[3]
+ cdef np.float64_t right_edge[3]
+ cdef public data
+ cdef Split * split
+ cdef int level
+
+cdef int point_in_node(Node node, np.ndarray[np.float64_t, ndim=1] point)
+cdef Node _find_node(Node node, np.float64_t *point)
+cdef int _kd_is_leaf(Node node)
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -25,25 +25,11 @@
DEF Nch = 4
-cdef struct Split:
- int dim
- np.float64_t pos
-
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
cdef class Node:
- cdef public Node left
- cdef public Node right
- cdef public Node parent
- cdef public int grid
- cdef public np.int64_t node_id
- cdef np.float64_t left_edge[3]
- cdef np.float64_t right_edge[3]
- cdef public data
- cdef Split * split
-
def __cinit__(self,
Node parent,
Node left,
@@ -152,11 +138,11 @@
def kd_traverse(Node trunk, viewpoint=None):
if viewpoint is None:
for node in depth_traverse(trunk):
- if kd_is_leaf(node) and node.grid != -1:
+ if _kd_is_leaf(node) == 1 and node.grid != -1:
yield node
else:
for node in viewpoint_traverse(trunk, viewpoint):
- if kd_is_leaf(node) and node.grid != -1:
+ if _kd_is_leaf(node) == 1 and node.grid != -1:
yield node
@cython.boundscheck(False)
@@ -172,7 +158,7 @@
if not should_i_build(node, rank, size):
return
- if kd_is_leaf(node):
+ if _kd_is_leaf(node) == 1:
insert_grid(node, gle, gre, gid, rank, size)
else:
less_id = gle[node.split.dim] < node.split.pos
@@ -295,7 +281,7 @@
if not should_i_build(node, rank, size):
return
- if kd_is_leaf(node):
+ if _kd_is_leaf(node) == 1:
insert_grids(node, ngrids, gles, gres, gids, rank, size)
return
@@ -766,11 +752,16 @@
assert has_l_child == has_r_child
return has_l_child
+cdef int _kd_is_leaf(Node node):
+ if node.left is None or node.right is None:
+ return 1
+ return 0
+
def step_depth(Node current, Node previous):
'''
Takes a single step in the depth-first traversal
'''
- if kd_is_leaf(current): # At a leaf, move back up
+ if _kd_is_leaf(current) == 1: # At a leaf, move back up
previous = current
current = current.parent
@@ -862,7 +853,7 @@
Takes a single step in the viewpoint based traversal. Always
goes to the node furthest away from viewpoint first.
'''
- if kd_is_leaf(current): # At a leaf, move back up
+ if _kd_is_leaf(current) == 1: # At a leaf, move back up
previous = current
current = current.parent
elif current.split.dim is None: # This is a dead node
@@ -913,6 +904,13 @@
inside *= node.right_edge[i] > point[i]
return inside
+cdef Node _find_node(Node node, np.float64_t *point):
+ while _kd_is_leaf(node) == 0:
+ if point[node.split.dim] < node.split.pos:
+ node = node.left
+ else:
+ node = node.right
+ return node
def find_node(Node node,
np.ndarray[np.float64_t, ndim=1] point):
@@ -920,12 +918,5 @@
Find the AMRKDTree node enclosing a position
"""
assert(point_in_node(node, point))
- while not kd_is_leaf(node):
- if point[node.split.dim] < node.split.pos:
- node = node.left
- else:
- node = node.right
- return node
+ return _find_node(node, <np.float64_t *> point.data)
-
-
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -17,6 +17,7 @@
import numpy as np
cimport numpy as np
cimport cython
+cimport kdtree_utils
cdef struct VolumeContainer:
int n_fields
@@ -29,6 +30,20 @@
np.float64_t idds[3]
int dims[3]
+cdef class PartitionedGrid:
+ cdef public object my_data
+ cdef public object source_mask
+ cdef public object LeftEdge
+ cdef public object RightEdge
+ cdef public int parent_grid_id
+ cdef VolumeContainer *container
+ cdef kdtree_utils.kdtree *star_list
+ cdef np.float64_t star_er
+ cdef np.float64_t star_sigma_num
+ cdef np.float64_t star_coeff
+ cdef void get_vector_field(self, np.float64_t pos[3],
+ np.float64_t *vel, np.float64_t *vel_mag)
+
ctypedef void sample_function(
VolumeContainer *vc,
np.float64_t v_pos[3],
@@ -45,3 +60,12 @@
void *data,
np.float64_t *return_t = *,
np.float64_t enter_t = *) nogil
+
+cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
+ return (i*vc.dims[1]+j)*vc.dims[2]+k
+
+cdef inline int vc_pos_index(VolumeContainer *vc, np.float64_t *spos):
+ cdef int i, index[3]
+ for i in range(3):
+ index[i] = <int> ((spos[i] - vc.left_edge[i]) * vc.idds[i])
+ return vc_index(vc, index[0], index[1], index[2])
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -16,7 +16,6 @@
import numpy as np
cimport numpy as np
cimport cython
-cimport kdtree_utils
#cimport healpix_interface
from libc.stdlib cimport malloc, free, abs
from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -58,16 +57,6 @@
void *data) nogil
cdef class PartitionedGrid:
- cdef public object my_data
- cdef public object source_mask
- cdef public object LeftEdge
- cdef public object RightEdge
- cdef public int parent_grid_id
- cdef VolumeContainer *container
- cdef kdtree_utils.kdtree *star_list
- cdef np.float64_t star_er
- cdef np.float64_t star_sigma_num
- cdef np.float64_t star_coeff
@cython.boundscheck(False)
@cython.wraparound(False)
diff -r c62c3cf06c207631c1c155d0de64ea9eb92fe711 -r 831c3d55fb61b11b8686e6c642769ef6306e8d70 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -65,7 +65,9 @@
["yt/utilities/lib/ContourFinding.pyx",
"yt/utilities/lib/union_find.c"],
include_dirs=["yt/utilities/lib/"],
- libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+ libraries=["m"],
+ depends=["yt/utilities/lib/fp_utils.pxd",
+ "yt/utilities/lib/amr_kdtools.pxd"])
config.add_extension("DepthFirstOctree",
["yt/utilities/lib/DepthFirstOctree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list