[yt-svn] commit/yt-3.0: 8 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Nov 26 06:32:13 PST 2013


8 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/cab93992e337/
Changeset:   cab93992e337
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 19:56:38
Summary:     First import of HTTPStream particle handler.
Affected #:  1 file

diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r cab93992e3375b3662ccec753462dbecc3cae6a1 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -28,6 +28,11 @@
 
 from yt.geometry.oct_container import _ORDER_MAX
 
+try:
+    import requests
+except ImportError:
+    requests = None
+
 CHUNKSIZE = 10000000
 
 def _get_h5_handle(fn):
@@ -532,3 +537,75 @@
             size = self._pdtypes[ptype].itemsize
             pos += data_file.total_particles[ptype] * size
         return field_offsets
+
+class IOHandlerHTTPStream(BaseIOHandler):
+    def __init__(self, pf):
+        if requests is None:
+            raise RuntimeError
+        self._url = pf.base_url 
+        # This should eventually manage the IO and cache it
+
+    def _open_stream(self, data_file, field):
+        # This does not actually stream yet!
+        ftype, fname = field
+        s = "%s/%s/%s/%s" % (self._url,
+            data_file.http_url, 
+            ftype, fname)
+        resp = requests.get(s)
+        if resp.status_code != 200:
+            raise RuntimeError
+        return resp.content
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            for ptype in ptf:
+                s = self._open_stream(data_file, (ptype, "Coordinates"))
+                c = np.frombuffer(s, dtype="float64")
+                c.shape = (c.shape[0]/3.0, 3)
+                yield ptype, (c[:,0], c[:,1], c[:,2])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            for ptype, field_list in sorted(ptf.items()):
+                s = self._open_stream(data_file, (ptype, "Coordinates"))
+                c = np.frombuffer(s, dtype="float64")
+                c.shape = (c.shape[0]/3.0, 3)
+                mask = selector.select_points(
+                            c[:,0], c[:,1], c[:,2])
+                del c
+                if mask is None: continue
+                for field in field_list:
+                    s = self._open_stream(data_file, (ptype, field))
+                    c = np.frombuffer(s, dtype="float64")
+                    if field in ("Coordinates", "Velocities"):
+                        c.shape = (c.shape[0]/3.0, 3)
+                    data = c[mask, ...]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        ptypes = self.pf.json_header["particle_types"]
+        pcount = sum(self.pf.json_header["particle_count"][ptype]
+                     for ptype in ptypes)
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        for ptype in ptypes:
+            s = self._open_stream(data_file, (ptype, "Coordinates"))
+            c = np.frombuffer(s, dtype="float64")
+            c.shape = (c.shape[0]/3.0, 3)
+            regions.add_data_file(c, data_file.file_id)
+            morton[ind:ind+c.shape[0]] = compute_morton(
+                c[:,0], c[:,1], c[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += c.shape[0]
+        return morton


https://bitbucket.org/yt_analysis/yt-3.0/commits/ec1ca9a24530/
Changeset:   ec1ca9a24530
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 20:12:10
Summary:     First attempt at HTTP Stream Particle Static Output
Affected #:  1 file

diff -r cab93992e3375b3662ccec753462dbecc3cae6a1 -r ec1ca9a245305f602500250b9f63c01090ed0da9 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -50,6 +50,11 @@
     particle_deposition_functions, \
     standard_particle_fields
 
+try:
+    import requests
+    import json
+except ImportError:
+    requests = None
 
 class ParticleFile(object):
     def __init__(self, pf, io, filename, file_id):
@@ -550,3 +555,75 @@
     def _is_valid(self, *args, **kwargs):
         # We do not allow load() of these files.
         return False
+
+class HTTPParticleFile(ParticleFile):
+    pass
+
+class HTTPStreamStaticOutput(ParticleStaticOutput):
+    _hierarchy_class = ParticleGeometryHandler
+    _file_class = HTTPParticleFile
+    _fieldinfo_fallback = GadgetFieldInfo
+    _fieldinfo_known = KnownGadgetFields
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
+    _particle_velocity_name = "Velocities"
+    
+    def __init__(self, base_url,
+                 data_style = "http_particle_stream",
+                 n_ref = 64, over_refine_factor=1):
+        if requests is None:
+            raise RuntimeError
+        self.base_url = base_url
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        super(HTTPStreamStaticOutput, self).__init__("", data_style)
+
+    def __repr__(self):
+        return self.base_url
+
+    def _parse_parameter_file(self):
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = "sph"
+
+        # Here's where we're going to grab the JSON index file
+        hreq = requests.get(self.base_url + "/yt_index.json")
+        if hreq.status != 200:
+            raise RuntimeError
+        header = json.loads(hreq.content)
+        self.parameters = header
+
+        # Now we get what we need
+        self.domain_left_edge = np.array(header['domain_left_edge'], "float64")
+        self.domain_right_edge = np.array(header['domain_right_edge'], "float64")
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.periodicity = (True, True, True)
+
+        self.unique_identifier = header.get("unique_identifier", time.time())
+        self.cosmological_simulation = int(header['cosmological_simulation'])
+        for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
+                     'hubble_constant'):
+            setattr(self, attr, float(header[attr]))
+
+        self.file_count = header['num_files']
+
+    def _set_units(self):
+        length_unit = float(self.parameters['units']['length'])
+        time_unit = float(self.parameters['units']['time'])
+        mass_unit = float(self.parameters['units']['mass'])
+        velocity_unit = length_unit / time_unit
+        self.conversion_factors["velocity"] = velocity_unit
+        self.conversion_factors["mass"] = mass_unit
+        self.conversion_factors["density"] = density_unit
+        self._unit_base['cm'] = length_unit
+        self._unit_base['s'] = time_unit
+        for u in sec_conversion:
+            self.time_units[u] = time_unit * sec_conversion[u]
+        super(HTTPStreamStaticOutput, self)._set_units()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if args[0].startswith("http://"):
+            return True
+        return False


https://bitbucket.org/yt_analysis/yt-3.0/commits/51f5d45de403/
Changeset:   51f5d45de403
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 20:54:41
Summary:     I am disabling this presently, as Nathan's fix in mainline is better.
Affected #:  1 file

diff -r ec1ca9a245305f602500250b9f63c01090ed0da9 -r 51f5d45de403cac2366ab75bfb98574ea2a86e1f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
                 obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
-        if not os.path.exists(apath): raise IOError(filename)
+        #if not os.path.exists(apath): raise IOError(filename)
         if apath not in _cached_pfs:
             obj = object.__new__(cls)
             if obj._skip_cache is False:


https://bitbucket.org/yt_analysis/yt-3.0/commits/5a128868b387/
Changeset:   5a128868b387
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 21:10:43
Summary:     Enabling "http" to skip isfile checks.
Affected #:  1 file

diff -r 51f5d45de403cac2366ab75bfb98574ea2a86e1f -r 5a128868b387043d9147b927a0a5364f717a0407 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
         if isinstance(arg, types.StringTypes):
             if os.path.exists(arg):
                 valid_file.append(True)
+            elif arg.startswith("http"):
+                valid_file.append(True)
             else:
                 if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
                     valid_file.append(True)


https://bitbucket.org/yt_analysis/yt-3.0/commits/580c10dc8eaf/
Changeset:   580c10dc8eaf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 21:12:15
Summary:     First working version of HTTP particle streaming.
Affected #:  2 files

diff -r 5a128868b387043d9147b927a0a5364f717a0407 -r 580c10dc8eaf5c6bce40f3aeb8e2090b39bcc73c yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -20,6 +20,7 @@
 import weakref
 import struct
 import glob
+import time
 import os
 
 from yt.utilities.fortran_utils import read_record
@@ -567,6 +568,7 @@
     _particle_mass_name = "Mass"
     _particle_coordinates_name = "Coordinates"
     _particle_velocity_name = "Velocities"
+    filename_template = ""
     
     def __init__(self, base_url,
                  data_style = "http_particle_stream",
@@ -588,9 +590,11 @@
 
         # Here's where we're going to grab the JSON index file
         hreq = requests.get(self.base_url + "/yt_index.json")
-        if hreq.status != 200:
+        if hreq.status_code != 200:
             raise RuntimeError
         header = json.loads(hreq.content)
+        header['particle_count'] = dict((int(k), header['particle_count'][k])
+            for k in header['particle_count'])
         self.parameters = header
 
         # Now we get what we need
@@ -600,6 +604,7 @@
         self.domain_dimensions = np.ones(3, "int32") * nz
         self.periodicity = (True, True, True)
 
+        self.current_time = header['current_time']
         self.unique_identifier = header.get("unique_identifier", time.time())
         self.cosmological_simulation = int(header['cosmological_simulation'])
         for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
@@ -612,15 +617,15 @@
         length_unit = float(self.parameters['units']['length'])
         time_unit = float(self.parameters['units']['time'])
         mass_unit = float(self.parameters['units']['mass'])
+        density_unit = mass_unit / length_unit ** 3
         velocity_unit = length_unit / time_unit
+        self._unit_base = {}
+        self._unit_base['cm'] = length_unit
+        self._unit_base['s'] = time_unit
+        super(HTTPStreamStaticOutput, self)._set_units()
         self.conversion_factors["velocity"] = velocity_unit
         self.conversion_factors["mass"] = mass_unit
         self.conversion_factors["density"] = density_unit
-        self._unit_base['cm'] = length_unit
-        self._unit_base['s'] = time_unit
-        for u in sec_conversion:
-            self.time_units[u] = time_unit * sec_conversion[u]
-        super(HTTPStreamStaticOutput, self)._set_units()
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 5a128868b387043d9147b927a0a5364f717a0407 -r 580c10dc8eaf5c6bce40f3aeb8e2090b39bcc73c yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -539,23 +539,33 @@
         return field_offsets
 
 class IOHandlerHTTPStream(BaseIOHandler):
+    _data_style = "http_particle_stream"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
+    
     def __init__(self, pf):
         if requests is None:
             raise RuntimeError
         self._url = pf.base_url 
         # This should eventually manage the IO and cache it
+        super(IOHandlerHTTPStream, self).__init__(pf)
 
     def _open_stream(self, data_file, field):
         # This does not actually stream yet!
         ftype, fname = field
         s = "%s/%s/%s/%s" % (self._url,
-            data_file.http_url, 
-            ftype, fname)
+            data_file.file_id, ftype, fname)
+        mylog.info("Loading URL %s", s)
         resp = requests.get(s)
         if resp.status_code != 200:
             raise RuntimeError
         return resp.content
 
+    def _identify_fields(self, data_file):
+        f = []
+        for ftype, fname in self.pf.parameters["field_list"]:
+            f.append((str(ftype), str(fname)))
+        return f
+
     def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
         data_files = set([])
@@ -593,9 +603,9 @@
                     yield (ptype, field), data
 
     def _initialize_index(self, data_file, regions):
-        ptypes = self.pf.json_header["particle_types"]
-        pcount = sum(self.pf.json_header["particle_count"][ptype]
-                     for ptype in ptypes)
+        header = self.pf.parameters
+        ptypes = header["particle_count"][data_file.file_id].keys()
+        pcount = sum(header["particle_count"][data_file.file_id].values())
         morton = np.empty(pcount, dtype='uint64')
         ind = 0
         for ptype in ptypes:
@@ -609,3 +619,6 @@
                 data_file.pf.domain_right_edge)
             ind += c.shape[0]
         return morton
+
+    def _count_particles(self, data_file):
+        return self.pf.parameters["particle_count"][data_file.file_id]


https://bitbucket.org/yt_analysis/yt-3.0/commits/f8bbe914dd12/
Changeset:   f8bbe914dd12
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 21:30:52
Summary:     This initializes the units correctly for HTTP particles.
Affected #:  2 files

diff -r 580c10dc8eaf5c6bce40f3aeb8e2090b39bcc73c -r f8bbe914dd12559e403a7ec603160ea25d29e4c5 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -620,8 +620,8 @@
         density_unit = mass_unit / length_unit ** 3
         velocity_unit = length_unit / time_unit
         self._unit_base = {}
-        self._unit_base['cm'] = length_unit
-        self._unit_base['s'] = time_unit
+        self._unit_base['cm'] = 1.0/length_unit
+        self._unit_base['s'] = 1.0/time_unit
         super(HTTPStreamStaticOutput, self)._set_units()
         self.conversion_factors["velocity"] = velocity_unit
         self.conversion_factors["mass"] = mass_unit

diff -r 580c10dc8eaf5c6bce40f3aeb8e2090b39bcc73c -r f8bbe914dd12559e403a7ec603160ea25d29e4c5 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -597,7 +597,7 @@
                 for field in field_list:
                     s = self._open_stream(data_file, (ptype, field))
                     c = np.frombuffer(s, dtype="float64")
-                    if field in ("Coordinates", "Velocities"):
+                    if field in self._vector_fields:
                         c.shape = (c.shape[0]/3.0, 3)
                     data = c[mask, ...]
                     yield (ptype, field), data


https://bitbucket.org/yt_analysis/yt-3.0/commits/db926677a1f4/
Changeset:   db926677a1f4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-19 21:47:42
Summary:     Adding a counter for how much data has been streamed.
Affected #:  1 file

diff -r f8bbe914dd12559e403a7ec603160ea25d29e4c5 -r db926677a1f44bced900b0f4f5484f5aac80c4e2 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -547,6 +547,7 @@
             raise RuntimeError
         self._url = pf.base_url 
         # This should eventually manage the IO and cache it
+        self.total_bytes = 0
         super(IOHandlerHTTPStream, self).__init__(pf)
 
     def _open_stream(self, data_file, field):
@@ -558,6 +559,7 @@
         resp = requests.get(s)
         if resp.status_code != 200:
             raise RuntimeError
+        self.total_bytes += len(resp.content)
         return resp.content
 
     def _identify_fields(self, data_file):


https://bitbucket.org/yt_analysis/yt-3.0/commits/dfaa8f1d5f96/
Changeset:   dfaa8f1d5f96
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-11-26 15:32:01
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #136)

HTTP loading for particles
Affected #:  4 files

diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r dfaa8f1d5f9622a8db64e1ec39f9d280788d04df yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
         if isinstance(arg, types.StringTypes):
             if os.path.exists(arg):
                 valid_file.append(True)
+            elif arg.startswith("http"):
+                valid_file.append(True)
             else:
                 if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
                     valid_file.append(True)

diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r dfaa8f1d5f9622a8db64e1ec39f9d280788d04df yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
                 obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
-        if not os.path.exists(apath): raise IOError(filename)
+        #if not os.path.exists(apath): raise IOError(filename)
         if apath not in _cached_pfs:
             obj = object.__new__(cls)
             if obj._skip_cache is False:

diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r dfaa8f1d5f9622a8db64e1ec39f9d280788d04df yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -20,6 +20,7 @@
 import weakref
 import struct
 import glob
+import time
 import os
 
 from yt.utilities.fortran_utils import read_record
@@ -50,6 +51,11 @@
     particle_deposition_functions, \
     standard_particle_fields
 
+try:
+    import requests
+    import json
+except ImportError:
+    requests = None
 
 class ParticleFile(object):
     def __init__(self, pf, io, filename, file_id):
@@ -550,3 +556,79 @@
     def _is_valid(self, *args, **kwargs):
         # We do not allow load() of these files.
         return False
+
+class HTTPParticleFile(ParticleFile):
+    pass
+
+class HTTPStreamStaticOutput(ParticleStaticOutput):
+    _hierarchy_class = ParticleGeometryHandler
+    _file_class = HTTPParticleFile
+    _fieldinfo_fallback = GadgetFieldInfo
+    _fieldinfo_known = KnownGadgetFields
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
+    _particle_velocity_name = "Velocities"
+    filename_template = ""
+    
+    def __init__(self, base_url,
+                 data_style = "http_particle_stream",
+                 n_ref = 64, over_refine_factor=1):
+        if requests is None:
+            raise RuntimeError
+        self.base_url = base_url
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        super(HTTPStreamStaticOutput, self).__init__("", data_style)
+
+    def __repr__(self):
+        return self.base_url
+
+    def _parse_parameter_file(self):
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = "sph"
+
+        # Here's where we're going to grab the JSON index file
+        hreq = requests.get(self.base_url + "/yt_index.json")
+        if hreq.status_code != 200:
+            raise RuntimeError
+        header = json.loads(hreq.content)
+        header['particle_count'] = dict((int(k), header['particle_count'][k])
+            for k in header['particle_count'])
+        self.parameters = header
+
+        # Now we get what we need
+        self.domain_left_edge = np.array(header['domain_left_edge'], "float64")
+        self.domain_right_edge = np.array(header['domain_right_edge'], "float64")
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.periodicity = (True, True, True)
+
+        self.current_time = header['current_time']
+        self.unique_identifier = header.get("unique_identifier", time.time())
+        self.cosmological_simulation = int(header['cosmological_simulation'])
+        for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
+                     'hubble_constant'):
+            setattr(self, attr, float(header[attr]))
+
+        self.file_count = header['num_files']
+
+    def _set_units(self):
+        length_unit = float(self.parameters['units']['length'])
+        time_unit = float(self.parameters['units']['time'])
+        mass_unit = float(self.parameters['units']['mass'])
+        density_unit = mass_unit / length_unit ** 3
+        velocity_unit = length_unit / time_unit
+        self._unit_base = {}
+        self._unit_base['cm'] = 1.0/length_unit
+        self._unit_base['s'] = 1.0/time_unit
+        super(HTTPStreamStaticOutput, self)._set_units()
+        self.conversion_factors["velocity"] = velocity_unit
+        self.conversion_factors["mass"] = mass_unit
+        self.conversion_factors["density"] = density_unit
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if args[0].startswith("http://"):
+            return True
+        return False

diff -r 6e029bcb0dbf8680278737524cce19e3365cbea1 -r dfaa8f1d5f9622a8db64e1ec39f9d280788d04df yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -28,6 +28,11 @@
 
 from yt.geometry.oct_container import _ORDER_MAX
 
+try:
+    import requests
+except ImportError:
+    requests = None
+
 CHUNKSIZE = 10000000
 
 def _get_h5_handle(fn):
@@ -543,3 +548,90 @@
             size = self._pdtypes[ptype].itemsize
             pos += data_file.total_particles[ptype] * size
         return field_offsets
+
+class IOHandlerHTTPStream(BaseIOHandler):
+    _data_style = "http_particle_stream"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
+    
+    def __init__(self, pf):
+        if requests is None:
+            raise RuntimeError
+        self._url = pf.base_url 
+        # This should eventually manage the IO and cache it
+        self.total_bytes = 0
+        super(IOHandlerHTTPStream, self).__init__(pf)
+
+    def _open_stream(self, data_file, field):
+        # This does not actually stream yet!
+        ftype, fname = field
+        s = "%s/%s/%s/%s" % (self._url,
+            data_file.file_id, ftype, fname)
+        mylog.info("Loading URL %s", s)
+        resp = requests.get(s)
+        if resp.status_code != 200:
+            raise RuntimeError
+        self.total_bytes += len(resp.content)
+        return resp.content
+
+    def _identify_fields(self, data_file):
+        f = []
+        for ftype, fname in self.pf.parameters["field_list"]:
+            f.append((str(ftype), str(fname)))
+        return f
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            for ptype in ptf:
+                s = self._open_stream(data_file, (ptype, "Coordinates"))
+                c = np.frombuffer(s, dtype="float64")
+                c.shape = (c.shape[0]/3.0, 3)
+                yield ptype, (c[:,0], c[:,1], c[:,2])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            for ptype, field_list in sorted(ptf.items()):
+                s = self._open_stream(data_file, (ptype, "Coordinates"))
+                c = np.frombuffer(s, dtype="float64")
+                c.shape = (c.shape[0]/3.0, 3)
+                mask = selector.select_points(
+                            c[:,0], c[:,1], c[:,2])
+                del c
+                if mask is None: continue
+                for field in field_list:
+                    s = self._open_stream(data_file, (ptype, field))
+                    c = np.frombuffer(s, dtype="float64")
+                    if field in self._vector_fields:
+                        c.shape = (c.shape[0]/3.0, 3)
+                    data = c[mask, ...]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        header = self.pf.parameters
+        ptypes = header["particle_count"][data_file.file_id].keys()
+        pcount = sum(header["particle_count"][data_file.file_id].values())
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        for ptype in ptypes:
+            s = self._open_stream(data_file, (ptype, "Coordinates"))
+            c = np.frombuffer(s, dtype="float64")
+            c.shape = (c.shape[0]/3.0, 3)
+            regions.add_data_file(c, data_file.file_id)
+            morton[ind:ind+c.shape[0]] = compute_morton(
+                c[:,0], c[:,1], c[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += c.shape[0]
+        return morton
+
+    def _count_particles(self, data_file):
+        return self.pf.parameters["particle_count"][data_file.file_id]

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list