[yt-svn] commit/yt: 14 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sun Jun 4 08:23:49 PDT 2017


14 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/6df9e1ba6ccb/
Changeset:   6df9e1ba6ccb
User:        jzuhone
Date:        2017-05-26 18:55:50+00:00
Summary:     Begin refactor of sanity checking of data dimensions and determination of number_of_particles
Affected #:  1 file

diff -r 2848c2031acc2b429d4effd45c4abf70d215cc80 -r 6df9e1ba6ccbabfbf032837d1a2957bab2f7ea6e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -22,6 +22,7 @@
     chain, \
     product, \
     repeat
+from collections import defaultdict
 
 from numbers import Number as numeric_type
 
@@ -275,9 +276,8 @@
         self.ds._find_particle_types()
 
         for i, grid in enumerate(self.grids):
-            if "number_of_particles" in data[i]:
-                self.stream_handler.particle_count[i] = data[i].pop("number_of_particles")
-            field_units, gdata = unitify_data(data[i])
+            field_units, gdata, number_of_particles = process_data(data[i])
+            self.stream_handler.particle_count[i] = number_of_particles
             self.stream_handler.field_units.update(field_units)
             for field in gdata:
                 if field in grid.field_data:
@@ -485,7 +485,7 @@
         npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0)
         ds.stream_handler.particle_count[gi] = npart
 
-def unitify_data(data):
+def process_data(data, grid_dims=None):
     new_data, field_units = {}, {}
     for field, val in data.items():
         # val is a data array
@@ -529,11 +529,12 @@
     # At this point, we have arrays for all our fields
     new_data = {}
     for field in data:
+        n_shape = len(data[field].shape)
         if isinstance(field, tuple):
             new_field = field
-        elif len(data[field].shape) in (1, 2):
+        elif n_shape in (1, 2):
             new_field = ("io", field)
-        elif len(data[field].shape) == 3:
+        elif n_shape == 3:
             new_field = ("stream", field)
         else:
             raise RuntimeError
@@ -547,7 +548,32 @@
            field_units[new_field] == "":
                field_units.pop(new_field)
     data = new_data
-    return field_units, data
+    # Sanity checking that all fields have the same dimensions.
+    g_shapes = []
+    p_shapes = defaultdict(list)
+    for field in data:
+        f_shape = data[field].shape
+        n_shape = len(f_shape)
+        if n_shape in (1, 2):
+            p_shapes[field[0]].append(f_shape[0])
+        elif n_shape == 3:
+            g_shapes.append(f_shape)
+    if len(g_shapes) > 0 and not np.all(np.array(g_shapes) == g_shapes[0]):
+        raise RuntimeError("Not all grid-based fields have the same shape!")
+    if grid_dims is not None and not np.all(np.array(g_shapes) == grid_dims):
+        raise RuntimeError("Not all grid-based fields match the grid dimensions!")
+    if len(p_shapes) > 0:
+        for ptype, p_shape in p_shapes.items():
+            if not np.all(np.array(p_shape) == p_shape[0]):
+                raise RuntimeError("Not all fields with field type '%s' " % ptype +
+                                   "have the same shape!")
+    # Now that we know the particle fields are consistent, determine the number
+    # of particles.
+    if len(p_shapes) > 0:
+        number_of_particles = np.sum([s[0] for s in p_shapes.values()])
+    else:
+        number_of_particles = 0
+    return field_units, data, number_of_particles
 
 
 def load_uniform_grid(data, domain_dimensions, length_unit=None, bbox=None,
@@ -567,8 +593,7 @@
       disappointing or non-existent in most cases.
     * Particles may be difficult to integrate.
 
-    Particle fields are detected as one-dimensional fields. The number of
-    particles is set by the "number_of_particles" key in data.
+    Particle fields are detected as one-dimensional fields.
 
     Parameters
     ----------
@@ -637,18 +662,12 @@
     domain_left_edge = np.array(bbox[:, 0], 'float64')
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
-    number_of_particles = data.pop("number_of_particles", 0)
-    # First we fix our field names
-    field_units, data = unitify_data(data)
-
-    dshape = tuple(domain_dimensions)
-    for field_name in data:
-        fshape = data[field_name].shape
-        if len(fshape) == 3 and fshape != dshape:
-            msg = ("Input data shape %s for field %s does not match provided "
-                   "domain_dimensions %s!")
-            msg = msg % (fshape, field_name, dshape)
-            raise RuntimeError(msg)
+    # If someone included this throw it away--old API
+    data.pop("number_of_particles", 0)
+    # First we fix our field names, apply units to data
+    # and check for consistency of field shapes
+    field_units, data, number_of_particles = process_data(data,
+                                                          grid_dims=tuple(domain_dimensions))
 
     sfh = StreamDictFieldHandler()
 
@@ -838,9 +857,11 @@
         grid_right_edges[i,:] = g.pop("right_edge")
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
-        if "number_of_particles" in g:
-            number_of_particles[i,:] = g.pop("number_of_particles")
-        field_units, data = unitify_data(g)
+        # If someone included this throw it away--old API
+        g.pop("number_of_particles", None)
+        field_units, data, n_particles = process_data(g,
+                                                      grid_dims=tuple(grid_dimensions[i,:]))
+        number_of_particles[i, :] = n_particles
         sfh[i] = data
 
     # We now reconstruct our parent ids, so that our particle assignment can
@@ -1113,7 +1134,7 @@
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
-    field_units, data = unitify_data(data)
+    field_units, data, _ = process_data(data)
     sfh = StreamDictFieldHandler()
 
     pdata = {}
@@ -1338,7 +1359,7 @@
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
-    field_units, data = unitify_data(data)
+    field_units, data, _ = process_data(data)
     sfh = StreamDictFieldHandler()
 
     particle_types = set_particle_types(data)
@@ -1585,7 +1606,7 @@
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
-    field_units, data = unitify_data(data)
+    field_units, data, _ = process_data(data)
     sfh = StreamDictFieldHandler()
 
     particle_types = set_particle_types(data)
@@ -1832,7 +1853,7 @@
     sfh.update({'connectivity': connectivity,
                 'coordinates': coordinates})
     for i, d in enumerate(data):
-        _f_unit, _data = unitify_data(d)
+        _f_unit, _data, _ = process_data(d)
         field_units.update(_f_unit)
         sfh[i] = _data
         particle_types.update(set_particle_types(d))


https://bitbucket.org/yt_analysis/yt/commits/322ba9801d8b/
Changeset:   322ba9801d8b
User:        jzuhone
Date:        2017-05-26 18:56:06+00:00
Summary:     Don’t add "all" fields to new datasets
Affected #:  1 file

diff -r 6df9e1ba6ccbabfbf032837d1a2957bab2f7ea6e -r 322ba9801d8b3514567ab7efc4e098e5ffbb166f yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -109,7 +109,8 @@
                     number_of_particles=grid.NumberOfParticles)
 
         for field in amr1.field_list:
-            data[field] = grid[field]
+            if field[0] != "all":
+                data[field] = grid[field]
 
         grid_data.append(data)
 
@@ -124,14 +125,14 @@
     assert_equal(number_of_particles1, number_of_particles2)
 
     for grid in amr1.index.grids:
-        tot_parts = grid["io","particle_position_x"].size
-        tot_all_parts = grid["all","particle_position_x"].size
+        tot_parts = grid["io", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 
     for grid in amr2.index.grids:
-        tot_parts = grid["io","particle_position_x"].size
-        tot_all_parts = grid["all","particle_position_x"].size
+        tot_parts = grid["io", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 
@@ -188,9 +189,9 @@
     assert_equal(number_of_particles3, number_of_particles4)
 
     for grid in ug4.index.grids:
-        tot_parts = grid["dm","particle_position_x"].size
-        tot_parts += grid["star","particle_position_x"].size
-        tot_all_parts = grid["all","particle_position_x"].size
+        tot_parts = grid["dm", "particle_position_x"].size
+        tot_parts += grid["star", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 
@@ -223,7 +224,8 @@
                     number_of_particles=grid.NumberOfParticles)
 
         for field in amr3.field_list:
-            data[field] = grid[field]
+            if field[0] != "all":
+                data[field] = grid[field]
 
         grid_data.append(data)
 
@@ -248,9 +250,9 @@
         assert amr4._get_field_info(ptype, "particle_mass").particle_type
 
     for grid in amr3.index.grids:
-        tot_parts = grid["dm","particle_position_x"].size
-        tot_parts += grid["star","particle_position_x"].size
-        tot_all_parts = grid["all","particle_position_x"].size
+        tot_parts = grid["dm", "particle_position_x"].size
+        tot_parts += grid["star", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 


https://bitbucket.org/yt_analysis/yt/commits/82cab373f879/
Changeset:   82cab373f879
User:        jzuhone
Date:        2017-05-26 19:01:40+00:00
Summary:     Make sure we only do this second check if g_shapes > 0
Affected #:  1 file

diff -r 322ba9801d8b3514567ab7efc4e098e5ffbb166f -r 82cab373f8798c439bc98471ce215c95c28cebc0 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -560,8 +560,9 @@
             g_shapes.append(f_shape)
     if len(g_shapes) > 0 and not np.all(np.array(g_shapes) == g_shapes[0]):
         raise RuntimeError("Not all grid-based fields have the same shape!")
-    if grid_dims is not None and not np.all(np.array(g_shapes) == grid_dims):
-        raise RuntimeError("Not all grid-based fields match the grid dimensions!")
+    if len(g_shapes) > 0 and grid_dims is not None: 
+        if not np.all(np.array(g_shapes) == grid_dims):
+            raise RuntimeError("Not all grid-based fields match the grid dimensions!")
     if len(p_shapes) > 0:
         for ptype, p_shape in p_shapes.items():
             if not np.all(np.array(p_shape) == p_shape[0]):


https://bitbucket.org/yt_analysis/yt/commits/ce622125237c/
Changeset:   ce622125237c
User:        jzuhone
Date:        2017-05-26 19:05:33+00:00
Summary:     Make particle_generator consistent with this
Affected #:  2 files

diff -r 82cab373f8798c439bc98471ce215c95c28cebc0 -r ce622125237c82fbbc721a42823a108a9249e872 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -269,10 +269,7 @@
         """
         particle_types = set_particle_types(data[0])
 
-        for key in data[0].keys():
-            if key == "number_of_particles":
-                continue
-            self.stream_handler.particle_types[key] = particle_types[key]
+        self.stream_handler.particle_types.update(particle_types)
         self.ds._find_particle_types()
 
         for i, grid in enumerate(self.grids):

diff -r 82cab373f8798c439bc98471ce215c95c28cebc0 -r ce622125237c82fbbc721a42823a108a9249e872 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -176,14 +176,12 @@
         grid_data = []
         for i, g in enumerate(self.ds.index.grids):
             data = {}
-            if clobber:
-                data["number_of_particles"] = self.NumberOfParticles[i]
-            else:
-                data["number_of_particles"] = self.NumberOfParticles[i] + \
-                                              g.NumberOfParticles
+            number_of_particles = self.NumberOfParticles[i]
+            if not clobber:
+                number_of_particles += g.NumberOfParticles
             grid_particles = self.get_for_grid(g)
             for field in self.field_list:
-                if data["number_of_particles"] > 0:
+                if number_of_particles > 0:
                     if g.NumberOfParticles > 0 and not clobber and \
                         field in self.ds.field_list:
                         # We have particles in this grid, we're not


https://bitbucket.org/yt_analysis/yt/commits/39f4c9766bcc/
Changeset:   39f4c9766bcc
User:        jzuhone
Date:        2017-05-26 19:22:44+00:00
Summary:     Remove number_of_particles from docs
Affected #:  2 files

diff -r ce622125237c82fbbc721a42823a108a9249e872 -r 39f4c9766bcca7c3030d37003777dfaec5c5213d doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -139,9 +139,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Particle fields are detected as one-dimensional fields. The number of\n",
-    "particles is set by the `number_of_particles` key in\n",
-    "`data`. Particle fields are then added as one-dimensional arrays in\n",
+    "Particle fields are detected as one-dimensional fields. Particle fields are then added as one-dimensional arrays in\n",
     "a similar manner as the three-dimensional grid fields:"
    ]
   },
@@ -157,7 +155,6 @@
     "posy_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
     "posz_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
     "data = dict(density = (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n",
-    "            number_of_particles = 10000,\n",
     "            particle_position_x = (posx_arr, 'code_length'), \n",
     "            particle_position_y = (posy_arr, 'code_length'),\n",
     "            particle_position_z = (posz_arr, 'code_length'))\n",
@@ -170,8 +167,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "In this example only the particle position fields have been assigned. `number_of_particles` must be the same size as the particle\n",
-    "arrays. If no particle arrays are supplied then `number_of_particles` is assumed to be zero. Take a slice, and overlay particle positions:"
+    "In this example only the particle position fields have been assigned. If no particle arrays are supplied, then the number of particles is assumed to be zero. Take a slice, and overlay particle positions:"
    ]
   },
   {
@@ -574,8 +570,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Particle fields are supported by adding 1-dimensional arrays to each `grid` and\n",
-    "setting the `number_of_particles` key in each `grid`'s dict. If a grid has no particles, set `number_of_particles = 0`, but the particle fields still have to be defined since they are defined elsewhere; set them to empty NumPy arrays:"
+    "Particle fields are supported by adding 1-dimensional arrays to each `grid`. If a grid has no particles, the particle fields still have to be defined since they are defined elsewhere; set them to empty NumPy arrays:"
    ]
   },
   {
@@ -586,11 +581,9 @@
    },
    "outputs": [],
    "source": [
-    "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
     "grid_data[0][\"particle_position_x\"] = (np.array([]), \"code_length\") # No particles, so set empty arrays\n",
     "grid_data[0][\"particle_position_y\"] = (np.array([]), \"code_length\")\n",
     "grid_data[0][\"particle_position_z\"] = (np.array([]), \"code_length\")\n",
-    "grid_data[1][\"number_of_particles\"] = 1000\n",
     "grid_data[1][\"particle_position_x\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")\n",
     "grid_data[1][\"particle_position_y\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")\n",
     "grid_data[1][\"particle_position_z\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")"
@@ -645,7 +638,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "For both uniform grid data and AMR data, one can specify particle fields with multiple types if the particle field names are given as field tuples instead of strings (the default particle type is `\"io\"`). Make sure that the total number of particles given in the `number_of_particles` key is set to the sum of the number of particles for all types:"
+    "For both uniform grid data and AMR data, one can specify particle fields with multiple types if the particle field names are given as field tuples instead of strings (the default particle type is `\"io\"`):"
    ]
   },
   {
@@ -661,7 +654,6 @@
     "posyb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
     "poszb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
     "data = {\"density\": (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n",
-    "        \"number_of_particles\": 30000,\n",
     "        (\"red\", \"particle_position_x\"): (posxr_arr, 'code_length'), \n",
     "        (\"red\", \"particle_position_y\"): (posyr_arr, 'code_length'),\n",
     "        (\"red\", \"particle_position_z\"): (poszr_arr, 'code_length'),\n",

diff -r ce622125237c82fbbc721a42823a108a9249e872 -r 39f4c9766bcca7c3030d37003777dfaec5c5213d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1253,13 +1253,11 @@
        dict(left_edge=[0.0, 0.0, 0.0],
             right_edge=[1.0, 1.0, 1.0],
             level=0,
-            dimensions=[32, 32, 32],
-            number_of_particles=0)
+            dimensions=[32, 32, 32])
        dict(left_edge=[0.25, 0.25, 0.25],
             right_edge=[0.75, 0.75, 0.75],
             level=1,
-            dimensions=[32, 32, 32],
-            number_of_particles=0)
+            dimensions=[32, 32, 32])
    ]
 
    for g in grid_data:
@@ -1272,14 +1270,13 @@
    yt only supports a block structure where the grid edges on the ``n``-th
    refinement level are aligned with the cell edges on the ``n-1``-th level.
 
-Particle fields are supported by adding 1-dimensional arrays and
-setting the ``number_of_particles`` key to each ``grid``'s dict:
+Particle fields are supported by adding 1-dimensional arrays to each 
+``grid``'s dict:
 
 .. code-block:: python
 
    for g in grid_data:
-       g["number_of_particles"] = 100000
-       g["particle_position_x"] = np.random.random((g["number_of_particles"]))
+       g["particle_position_x"] = np.random.random(size=100000)
 
 .. rubric:: Caveats
 
@@ -1318,26 +1315,22 @@
 simultaneously divide the domain into 12 chunks, so that you can take advantage
 of the underlying parallelism.
 
-Particle fields are detected as one-dimensional fields. The number of
-particles is set by the ``number_of_particles`` key in
-``data``. Particle fields are then added as one-dimensional arrays in
-a similar manner as the three-dimensional grid fields:
+Particle fields are added as one-dimensional arrays in a similar manner as the 
+three-dimensional grid fields:
 
 .. code-block:: python
 
    import yt
 
    data = dict(Density = dens,
-               number_of_particles = 1000000,
                particle_position_x = posx_arr,
-	       particle_position_y = posy_arr,
-	       particle_position_z = posz_arr)
+	           particle_position_y = posy_arr,
+	           particle_position_z = posz_arr)
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
    ds = yt.load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
 
-where in this example the particle position fields have been assigned.
-``number_of_particles`` must be the same size as the particle arrays. If no
-particle arrays are supplied then ``number_of_particles`` is assumed to be
+where in this example the particle position fields have been assigned. If no
+particle fields are supplied, then the number of particles is assumed to be
 zero.
 
 .. rubric:: Caveats


https://bitbucket.org/yt_analysis/yt/commits/1398c1014d69/
Changeset:   1398c1014d69
User:        jzuhone
Date:        2017-05-26 19:24:55+00:00
Summary:     Remove number_of_particles from test
Affected #:  1 file

diff -r 39f4c9766bcca7c3030d37003777dfaec5c5213d -r 1398c1014d690f1911916b1af7e6f108ac429ce3 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -39,8 +39,7 @@
         data = dict(left_edge=grid.LeftEdge,
                     right_edge=grid.RightEdge,
                     level=grid.Level,
-                    dimensions=grid.ActiveDimensions,
-                    number_of_particles=grid.NumberOfParticles)
+                    dimensions=grid.ActiveDimensions)
 
         for field in amr0.field_list:
             data[field] = grid[field]
@@ -54,8 +53,7 @@
                "particle_position_x": x,
                "particle_position_y": y,
                "particle_position_z": z,
-               "particle_mass": m,
-               "number_of_particles": num_particles}
+               "particle_mass": m}
 
     fields2 = fields1.copy()
 
@@ -105,8 +103,7 @@
         data = dict(left_edge=grid.LeftEdge,
                     right_edge=grid.RightEdge,
                     level=grid.Level,
-                    dimensions=grid.ActiveDimensions,
-                    number_of_particles=grid.NumberOfParticles)
+                    dimensions=grid.ActiveDimensions)
 
         for field in amr1.field_list:
             if field[0] != "all":
@@ -172,8 +169,7 @@
                ("star", "particle_position_x"): xs,
                ("star", "particle_position_y"): ys,
                ("star", "particle_position_z"): zs,
-               ("star", "particle_mass"): ms,
-               "number_of_particles": num_dm_particles+num_star_particles}
+               ("star", "particle_mass"): ms}
 
     fields4 = fields3.copy()
 
@@ -220,8 +216,7 @@
         data = dict(left_edge=grid.LeftEdge,
                     right_edge=grid.RightEdge,
                     level=grid.Level,
-                    dimensions=grid.ActiveDimensions,
-                    number_of_particles=grid.NumberOfParticles)
+                    dimensions=grid.ActiveDimensions)
 
         for field in amr3.field_list:
             if field[0] != "all":


https://bitbucket.org/yt_analysis/yt/commits/a1fd92c77c35/
Changeset:   a1fd92c77c35
User:        jzuhone
Date:        2017-05-26 21:55:51+00:00
Summary:     Adding specific exceptions for these occurrences
Affected #:  2 files

diff -r 1398c1014d690f1911916b1af7e6f108ac429ce3 -r a1fd92c77c350b08ad6990905e1f62ac87981a8e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -63,7 +63,9 @@
 from yt.utilities.decompose import \
     decompose_array, get_psize
 from yt.utilities.exceptions import \
-    YTIllDefinedAMR
+    YTIllDefinedAMR, \
+    YTInconsistentGridFieldShape, \
+    YTInconsistentParticleFieldShape
 from yt.units.yt_array import \
     YTQuantity, \
     uconcatenate
@@ -556,15 +558,14 @@
         elif n_shape == 3:
             g_shapes.append(f_shape)
     if len(g_shapes) > 0 and not np.all(np.array(g_shapes) == g_shapes[0]):
-        raise RuntimeError("Not all grid-based fields have the same shape!")
-    if len(g_shapes) > 0 and grid_dims is not None: 
+        raise YTInconsistentGridFieldShape(False)
+    if len(g_shapes) > 0 and grid_dims is not None:
         if not np.all(np.array(g_shapes) == grid_dims):
-            raise RuntimeError("Not all grid-based fields match the grid dimensions!")
+            raise YTInconsistentGridFieldShape(True)
     if len(p_shapes) > 0:
         for ptype, p_shape in p_shapes.items():
             if not np.all(np.array(p_shape) == p_shape[0]):
-                raise RuntimeError("Not all fields with field type '%s' " % ptype +
-                                   "have the same shape!")
+                raise YTInconsistentParticleFieldShape(ptype)
     # Now that we know the particle fields are consistent, determine the number
     # of particles.
     if len(p_shapes) > 0:

diff -r 1398c1014d690f1911916b1af7e6f108ac429ce3 -r a1fd92c77c350b08ad6990905e1f62ac87981a8e yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -661,3 +661,24 @@
             "on the parent level ({} axis)"
         ).format(self.level, self.axis)
         return msg
+
+class YTInconsistentGridFieldShape(YTException):
+    def __init__(self, grid_dims_check):
+        self.grid_dims_check = grid_dims_check
+
+    def __str__(self):
+        if self.grid_dims_check:
+            msg = "Not all grid-based fields match the grid dimensions!"
+        else:
+            msg = "Not all grid-based fields have the same shape!"
+        return msg
+
+class YTInconsistentParticleFieldShape(YTException):
+    def __init__(self, ptype):
+        self.ptype = ptype
+
+    def __str__(self):
+        msg = (
+            "Not all fields with field type {} have the same shape!"
+        ).format(self.ptype)
+        return msg
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/c25c2080c28a/
Changeset:   c25c2080c28a
User:        jzuhone
Date:        2017-05-26 21:56:02+00:00
Summary:     Adding a test for the exceptions
Affected #:  1 file

diff -r a1fd92c77c350b08ad6990905e1f62ac87981a8e -r c25c2080c28aa473b2f4e4daf8073cb0e931d44e yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -19,12 +19,15 @@
 import tempfile
 import unittest
 
-from yt.frontends.stream.data_structures import load_uniform_grid
+from yt.frontends.stream.data_structures import load_uniform_grid, \
+    load_particles
 from yt.testing import \
     assert_equal, \
     assert_raises
 from yt.convenience import load
-from yt.utilities.exceptions import YTOutputNotIdentified
+from yt.utilities.exceptions import YTOutputNotIdentified, \
+    YTInconsistentGridFieldShape, \
+    YTInconsistentParticleFieldShape
 
 class TestEmptyLoad(unittest.TestCase):
 
@@ -59,3 +62,35 @@
     dd = ds.all_data()
 
     assert_equal(Z.max(), dd["metallicity"].max())
+
+def test_inconsistent_field_shape():
+
+    def load_field_field_mismatch():
+        d = np.random.uniform(size=(32, 32, 32))
+        t = np.random.uniform(size=(32, 64, 32))
+        data = {"density": d, "temperature": t}
+        load_uniform_grid(data, (32,32,32))
+
+    assert_raises(YTInconsistentGridFieldShape,
+                  load_field_field_mismatch)
+
+    def load_field_grid_mismatch():
+        d = np.random.uniform(size=(32, 32, 32))
+        t = np.random.uniform(size=(32, 32, 32))
+        data = {"density": d, "temperature": t}
+        load_uniform_grid(data, (32,64,32))
+
+    assert_raises(YTInconsistentGridFieldShape,
+                  load_field_grid_mismatch)
+
+    def load_particle_fields_mismatch():
+        x = np.random.uniform(size=100)
+        y = np.random.uniform(size=100)
+        z = np.random.uniform(size=200)
+        data = {"particle_position_x": x,
+                "particle_position_y": y,
+                "particle_position_z": z}
+        load_particles(data)
+
+    assert_raises(YTInconsistentParticleFieldShape,
+                  load_particle_fields_mismatch)
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/1bfb74b61d17/
Changeset:   1bfb74b61d17
User:        jzuhone
Date:        2017-05-27 01:27:39+00:00
Summary:     Previous hacking with recreating the field_info is no longer necessary
Affected #:  1 file

diff -r c25c2080c28aa473b2f4e4daf8073cb0e931d44e -r 1bfb74b61d17bb2169c8e7b626930e8e9c9e1f8e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1022,21 +1022,15 @@
 
         ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox)
 
-        cur_gc = ds.index.num_grids
-
-    ds.particle_types_raw = base_ds.particle_types_raw
-    ds.particle_types = ds.particle_types_raw
+        ds.particle_types_raw = base_ds.particle_types_raw
+        ds.particle_types = ds.particle_types_raw
 
-    # Now figure out where the particles go
-    if number_of_particles > 0:
-        # This will update the stream handler too
-        assign_particle_data(ds, pdata)
-        # Because we've already used the index, we
-        # have to re-create the field info because
-        # we added particle data after the fact
-        ds.index._reset_particle_count()
-        ds.index._detect_output_fields()
-        ds.create_field_info()
+        # Now figure out where the particles go
+        if number_of_particles > 0:
+            # This will update the stream handler too
+            assign_particle_data(ds, pdata)
+
+        cur_gc = ds.index.num_grids
 
     return ds
 


https://bitbucket.org/yt_analysis/yt/commits/e5406dadf584/
Changeset:   e5406dadf584
User:        jzuhone
Date:        2017-06-02 18:19:41+00:00
Summary:     Fix long lines
Affected #:  1 file

diff -r 1bfb74b61d17bb2169c8e7b626930e8e9c9e1f8e -r e5406dadf584cc01e56e56e6aeafabb82f204082 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -665,8 +665,8 @@
     data.pop("number_of_particles", 0)
     # First we fix our field names, apply units to data
     # and check for consistency of field shapes
-    field_units, data, number_of_particles = process_data(data,
-                                                          grid_dims=tuple(domain_dimensions))
+    field_units, data, number_of_particles = process_data(
+        data, grid_dims=tuple(domain_dimensions))
 
     sfh = StreamDictFieldHandler()
 
@@ -858,8 +858,8 @@
         grid_levels[i,:] = g.pop("level")
         # If someone included this throw it away--old API
         g.pop("number_of_particles", None)
-        field_units, data, n_particles = process_data(g,
-                                                      grid_dims=tuple(grid_dimensions[i,:]))
+        field_units, data, n_particles = process_data(
+            g, grid_dims=tuple(grid_dimensions[i,:]))
         number_of_particles[i, :] = n_particles
         sfh[i] = data
 


https://bitbucket.org/yt_analysis/yt/commits/c385e6928bed/
Changeset:   c385e6928bed
User:        jzuhone
Date:        2017-06-02 18:25:23+00:00
Summary:     Add deprecation warning for this
Affected #:  1 file

diff -r e5406dadf584cc01e56e56e6aeafabb82f204082 -r c385e6928beda5c3986d9ad5408e10ce1677690b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -28,7 +28,8 @@
 
 from yt.funcs import \
     iterable, \
-    ensure_list
+    ensure_list, \
+    issue_deprecation_warning
 from yt.utilities.io_handler import io_registry
 from yt.data_objects.field_data import \
     YTFieldData
@@ -662,7 +663,13 @@
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
     # If someone included this throw it away--old API
-    data.pop("number_of_particles", 0)
+    if "number_of_particles" in data:
+        issue_deprecation_warning("It is no longer necessary to include "
+                                  "the number of particles in the data "
+                                  "dict. The number of particles is "
+                                  "determined from the sizes of the "
+                                  "particle fields.")
+        data.pop("number_of_particles")
     # First we fix our field names, apply units to data
     # and check for consistency of field shapes
     field_units, data, number_of_particles = process_data(
@@ -857,7 +864,13 @@
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
         # If someone included this throw it away--old API
-        g.pop("number_of_particles", None)
+        if "number_of_particles" in g:
+            issue_deprecation_warning("It is no longer necessary to include "
+                                      "the number of particles in the data "
+                                      "dict. The number of particles is "
+                                      "determined from the sizes of the "
+                                      "particle fields.")
+            g.pop("number_of_particles")
         field_units, data, n_particles = process_data(
             g, grid_dims=tuple(grid_dimensions[i,:]))
         number_of_particles[i, :] = n_particles


https://bitbucket.org/yt_analysis/yt/commits/55d7255b3024/
Changeset:   55d7255b3024
User:        jzuhone
Date:        2017-06-02 18:59:14+00:00
Summary:     Improve exception-raising to give more information to the user
Affected #:  3 files

diff -r c385e6928beda5c3986d9ad5408e10ce1677690b -r 55d7255b3024c829a6addf0a90037143dbf6bf5a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -66,7 +66,8 @@
 from yt.utilities.exceptions import \
     YTIllDefinedAMR, \
     YTInconsistentGridFieldShape, \
-    YTInconsistentParticleFieldShape
+    YTInconsistentParticleFieldShape, \
+    YTInconsistentGridFieldShapeGridDims
 from yt.units.yt_array import \
     YTQuantity, \
     uconcatenate
@@ -555,18 +556,20 @@
         f_shape = data[field].shape
         n_shape = len(f_shape)
         if n_shape in (1, 2):
-            p_shapes[field[0]].append(f_shape[0])
+            p_shapes[field[0]].append((field[1], f_shape[0]))
         elif n_shape == 3:
-            g_shapes.append(f_shape)
-    if len(g_shapes) > 0 and not np.all(np.array(g_shapes) == g_shapes[0]):
-        raise YTInconsistentGridFieldShape(False)
-    if len(g_shapes) > 0 and grid_dims is not None:
-        if not np.all(np.array(g_shapes) == grid_dims):
-            raise YTInconsistentGridFieldShape(True)
+            g_shapes.append((field, f_shape))
+    if len(g_shapes) > 0:
+        g_s = np.array([s[1] for s in g_shapes])
+        if not np.all(g_s == g_s[0]):
+            raise YTInconsistentGridFieldShape(g_shapes)
+        if grid_dims is not None:
+            if not np.all(g_s == grid_dims):
+                raise YTInconsistentGridFieldShapeGridDims(g_shapes, grid_dims)
     if len(p_shapes) > 0:
         for ptype, p_shape in p_shapes.items():
             if not np.all(np.array(p_shape) == p_shape[0]):
-                raise YTInconsistentParticleFieldShape(ptype)
+                raise YTInconsistentParticleFieldShape(ptype, p_shape)
     # Now that we know the particle fields are consistent, determine the number
     # of particles.
     if len(p_shapes) > 0:

diff -r c385e6928beda5c3986d9ad5408e10ce1677690b -r 55d7255b3024c829a6addf0a90037143dbf6bf5a yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -25,9 +25,11 @@
     assert_equal, \
     assert_raises
 from yt.convenience import load
-from yt.utilities.exceptions import YTOutputNotIdentified, \
+from yt.utilities.exceptions import \
+    YTOutputNotIdentified, \
     YTInconsistentGridFieldShape, \
-    YTInconsistentParticleFieldShape
+    YTInconsistentParticleFieldShape, \
+    YTInconsistentGridFieldShapeGridDims
 
 class TestEmptyLoad(unittest.TestCase):
 
@@ -80,7 +82,7 @@
         data = {"density": d, "temperature": t}
         load_uniform_grid(data, (32,64,32))
 
-    assert_raises(YTInconsistentGridFieldShape,
+    assert_raises(YTInconsistentGridFieldShapeGridDims,
                   load_field_grid_mismatch)
 
     def load_particle_fields_mismatch():

diff -r c385e6928beda5c3986d9ad5408e10ce1677690b -r 55d7255b3024c829a6addf0a90037143dbf6bf5a yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -663,22 +663,39 @@
         return msg
 
 class YTInconsistentGridFieldShape(YTException):
-    def __init__(self, grid_dims_check):
-        self.grid_dims_check = grid_dims_check
+    def __init__(self, shapes):
+        self.shapes = shapes
 
     def __str__(self):
-        if self.grid_dims_check:
-            msg = "Not all grid-based fields match the grid dimensions!"
-        else:
-            msg = "Not all grid-based fields have the same shape!"
+        msg = "Not all grid-based fields have the same shape!\n"
+        for name, shape in self.shapes:
+            msg += "    Field {} has shape {}.\n".format(name, shape)
         return msg
 
 class YTInconsistentParticleFieldShape(YTException):
-    def __init__(self, ptype):
+    def __init__(self, ptype, shapes):
         self.ptype = ptype
+        self.shapes = shapes
 
     def __str__(self):
         msg = (
-            "Not all fields with field type {} have the same shape!"
+            "Not all fields with field type '{}' have the same shape!\n"
         ).format(self.ptype)
-        return msg
\ No newline at end of file
+        for name, shape in self.shapes:
+            field = (self.ptype, name)
+            msg += "    Field {} has shape {}.\n".format(field, shape)
+        return msg
+
+class YTInconsistentGridFieldShapeGridDims(YTException):
+    def __init__(self, shapes, grid_dims):
+        self.shapes = shapes
+        self.grid_dims = grid_dims
+
+    def __str__(self):
+        msg = "Not all grid-based fields match the grid dimensions! "
+        msg += "Grid dims are {}, ".format(self.grid_dims)
+        msg += "and the following fields have shapes that do not match them:\n"
+        for name, shape in self.shapes:
+            if shape != self.grid_dims:
+                msg += "    Field {} has shape {}.\n".format(name, shape)
+        return msg


https://bitbucket.org/yt_analysis/yt/commits/a31d00a88493/
Changeset:   a31d00a88493
User:        jzuhone
Date:        2017-06-02 20:59:45+00:00
Summary:     Bugfixes
Affected #:  1 file

diff -r 55d7255b3024c829a6addf0a90037143dbf6bf5a -r a31d00a884936bb84830ef7aee1ecdb1602639ae yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -568,12 +568,13 @@
                 raise YTInconsistentGridFieldShapeGridDims(g_shapes, grid_dims)
     if len(p_shapes) > 0:
         for ptype, p_shape in p_shapes.items():
-            if not np.all(np.array(p_shape) == p_shape[0]):
+            p_s = np.array([s[1] for s in p_shape])
+            if not np.all(p_s == p_s[0]):
                 raise YTInconsistentParticleFieldShape(ptype, p_shape)
     # Now that we know the particle fields are consistent, determine the number
     # of particles.
     if len(p_shapes) > 0:
-        number_of_particles = np.sum([s[0] for s in p_shapes.values()])
+        number_of_particles = np.sum([s[0][1] for s in p_shapes.values()])
     else:
         number_of_particles = 0
     return field_units, data, number_of_particles


https://bitbucket.org/yt_analysis/yt/commits/662fdbe5331a/
Changeset:   662fdbe5331a
User:        ngoldbaum
Date:        2017-06-03 17:09:27+00:00
Summary:     Merge pull request #1428 from jzuhone/stream_refactor

Stream frontend: add sanity checks for field shapes and remove number_of_particles requirement
Affected #:  7 files

diff -r 026d5035a5464d1d8b669afd57c3c9f646a36d9d -r 662fdbe5331ad344f09868c8115f697e5a3bae62 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -139,9 +139,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Particle fields are detected as one-dimensional fields. The number of\n",
-    "particles is set by the `number_of_particles` key in\n",
-    "`data`. Particle fields are then added as one-dimensional arrays in\n",
+    "Particle fields are detected as one-dimensional fields. Particle fields are then added as one-dimensional arrays in\n",
     "a similar manner as the three-dimensional grid fields:"
    ]
   },
@@ -157,7 +155,6 @@
     "posy_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
     "posz_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
     "data = dict(density = (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n",
-    "            number_of_particles = 10000,\n",
     "            particle_position_x = (posx_arr, 'code_length'), \n",
     "            particle_position_y = (posy_arr, 'code_length'),\n",
     "            particle_position_z = (posz_arr, 'code_length'))\n",
@@ -170,8 +167,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "In this example only the particle position fields have been assigned. `number_of_particles` must be the same size as the particle\n",
-    "arrays. If no particle arrays are supplied then `number_of_particles` is assumed to be zero. Take a slice, and overlay particle positions:"
+    "In this example only the particle position fields have been assigned. If no particle arrays are supplied, then the number of particles is assumed to be zero. Take a slice, and overlay particle positions:"
    ]
   },
   {
@@ -574,8 +570,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Particle fields are supported by adding 1-dimensional arrays to each `grid` and\n",
-    "setting the `number_of_particles` key in each `grid`'s dict. If a grid has no particles, set `number_of_particles = 0`, but the particle fields still have to be defined since they are defined elsewhere; set them to empty NumPy arrays:"
+    "Particle fields are supported by adding 1-dimensional arrays to each `grid`. If a grid has no particles, the particle fields still have to be defined since they are defined elsewhere; set them to empty NumPy arrays:"
    ]
   },
   {
@@ -586,11 +581,9 @@
    },
    "outputs": [],
    "source": [
-    "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
     "grid_data[0][\"particle_position_x\"] = (np.array([]), \"code_length\") # No particles, so set empty arrays\n",
     "grid_data[0][\"particle_position_y\"] = (np.array([]), \"code_length\")\n",
     "grid_data[0][\"particle_position_z\"] = (np.array([]), \"code_length\")\n",
-    "grid_data[1][\"number_of_particles\"] = 1000\n",
     "grid_data[1][\"particle_position_x\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")\n",
     "grid_data[1][\"particle_position_y\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")\n",
     "grid_data[1][\"particle_position_z\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")"
@@ -645,7 +638,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "For both uniform grid data and AMR data, one can specify particle fields with multiple types if the particle field names are given as field tuples instead of strings (the default particle type is `\"io\"`). Make sure that the total number of particles given in the `number_of_particles` key is set to the sum of the number of particles for all types:"
+    "For both uniform grid data and AMR data, one can specify particle fields with multiple types if the particle field names are given as field tuples instead of strings (the default particle type is `\"io\"`):"
    ]
   },
   {
@@ -661,7 +654,6 @@
     "posyb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
     "poszb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
     "data = {\"density\": (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n",
-    "        \"number_of_particles\": 30000,\n",
     "        (\"red\", \"particle_position_x\"): (posxr_arr, 'code_length'), \n",
     "        (\"red\", \"particle_position_y\"): (posyr_arr, 'code_length'),\n",
     "        (\"red\", \"particle_position_z\"): (poszr_arr, 'code_length'),\n",

diff -r 026d5035a5464d1d8b669afd57c3c9f646a36d9d -r 662fdbe5331ad344f09868c8115f697e5a3bae62 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1253,13 +1253,11 @@
        dict(left_edge=[0.0, 0.0, 0.0],
             right_edge=[1.0, 1.0, 1.0],
             level=0,
-            dimensions=[32, 32, 32],
-            number_of_particles=0)
+            dimensions=[32, 32, 32])
        dict(left_edge=[0.25, 0.25, 0.25],
             right_edge=[0.75, 0.75, 0.75],
             level=1,
-            dimensions=[32, 32, 32],
-            number_of_particles=0)
+            dimensions=[32, 32, 32])
    ]
 
    for g in grid_data:
@@ -1272,14 +1270,13 @@
    yt only supports a block structure where the grid edges on the ``n``-th
    refinement level are aligned with the cell edges on the ``n-1``-th level.
 
-Particle fields are supported by adding 1-dimensional arrays and
-setting the ``number_of_particles`` key to each ``grid``'s dict:
+Particle fields are supported by adding 1-dimensional arrays to each 
+``grid``'s dict:
 
 .. code-block:: python
 
    for g in grid_data:
-       g["number_of_particles"] = 100000
-       g["particle_position_x"] = np.random.random((g["number_of_particles"]))
+       g["particle_position_x"] = np.random.random(size=100000)
 
 .. rubric:: Caveats
 
@@ -1318,26 +1315,22 @@
 simultaneously divide the domain into 12 chunks, so that you can take advantage
 of the underlying parallelism.
 
-Particle fields are detected as one-dimensional fields. The number of
-particles is set by the ``number_of_particles`` key in
-``data``. Particle fields are then added as one-dimensional arrays in
-a similar manner as the three-dimensional grid fields:
+Particle fields are added as one-dimensional arrays in a similar manner as the 
+three-dimensional grid fields:
 
 .. code-block:: python
 
    import yt
 
    data = dict(Density = dens,
-               number_of_particles = 1000000,
                particle_position_x = posx_arr,
-	       particle_position_y = posy_arr,
-	       particle_position_z = posz_arr)
+	           particle_position_y = posy_arr,
+	           particle_position_z = posz_arr)
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
    ds = yt.load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
 
-where in this example the particle position fields have been assigned.
-``number_of_particles`` must be the same size as the particle arrays. If no
-particle arrays are supplied then ``number_of_particles`` is assumed to be
+where in this example the particle position fields have been assigned. If no
+particle fields are supplied, then the number of particles is assumed to be
 zero.
 
 .. rubric:: Caveats

diff -r 026d5035a5464d1d8b669afd57c3c9f646a36d9d -r 662fdbe5331ad344f09868c8115f697e5a3bae62 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -22,12 +22,14 @@
     chain, \
     product, \
     repeat
+from collections import defaultdict
 
 from numbers import Number as numeric_type
 
 from yt.funcs import \
     iterable, \
-    ensure_list
+    ensure_list, \
+    issue_deprecation_warning
 from yt.utilities.io_handler import io_registry
 from yt.data_objects.field_data import \
     YTFieldData
@@ -62,7 +64,10 @@
 from yt.utilities.decompose import \
     decompose_array, get_psize
 from yt.utilities.exceptions import \
-    YTIllDefinedAMR
+    YTIllDefinedAMR, \
+    YTInconsistentGridFieldShape, \
+    YTInconsistentParticleFieldShape, \
+    YTInconsistentGridFieldShapeGridDims
 from yt.units.yt_array import \
     YTQuantity, \
     uconcatenate
@@ -268,16 +273,12 @@
         """
         particle_types = set_particle_types(data[0])
 
-        for key in data[0].keys():
-            if key == "number_of_particles":
-                continue
-            self.stream_handler.particle_types[key] = particle_types[key]
+        self.stream_handler.particle_types.update(particle_types)
         self.ds._find_particle_types()
 
         for i, grid in enumerate(self.grids):
-            if "number_of_particles" in data[i]:
-                self.stream_handler.particle_count[i] = data[i].pop("number_of_particles")
-            field_units, gdata = unitify_data(data[i])
+            field_units, gdata, number_of_particles = process_data(data[i])
+            self.stream_handler.particle_count[i] = number_of_particles
             self.stream_handler.field_units.update(field_units)
             for field in gdata:
                 if field in grid.field_data:
@@ -485,7 +486,7 @@
         npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0)
         ds.stream_handler.particle_count[gi] = npart
 
-def unitify_data(data):
+def process_data(data, grid_dims=None):
     new_data, field_units = {}, {}
     for field, val in data.items():
         # val is a data array
@@ -529,11 +530,12 @@
     # At this point, we have arrays for all our fields
     new_data = {}
     for field in data:
+        n_shape = len(data[field].shape)
         if isinstance(field, tuple):
             new_field = field
-        elif len(data[field].shape) in (1, 2):
+        elif n_shape in (1, 2):
             new_field = ("io", field)
-        elif len(data[field].shape) == 3:
+        elif n_shape == 3:
             new_field = ("stream", field)
         else:
             raise RuntimeError
@@ -547,7 +549,35 @@
            field_units[new_field] == "":
                field_units.pop(new_field)
     data = new_data
-    return field_units, data
+    # Sanity checking that all fields have the same dimensions.
+    g_shapes = []
+    p_shapes = defaultdict(list)
+    for field in data:
+        f_shape = data[field].shape
+        n_shape = len(f_shape)
+        if n_shape in (1, 2):
+            p_shapes[field[0]].append((field[1], f_shape[0]))
+        elif n_shape == 3:
+            g_shapes.append((field, f_shape))
+    if len(g_shapes) > 0:
+        g_s = np.array([s[1] for s in g_shapes])
+        if not np.all(g_s == g_s[0]):
+            raise YTInconsistentGridFieldShape(g_shapes)
+        if grid_dims is not None:
+            if not np.all(g_s == grid_dims):
+                raise YTInconsistentGridFieldShapeGridDims(g_shapes, grid_dims)
+    if len(p_shapes) > 0:
+        for ptype, p_shape in p_shapes.items():
+            p_s = np.array([s[1] for s in p_shape])
+            if not np.all(p_s == p_s[0]):
+                raise YTInconsistentParticleFieldShape(ptype, p_shape)
+    # Now that we know the particle fields are consistent, determine the number
+    # of particles.
+    if len(p_shapes) > 0:
+        number_of_particles = np.sum([s[0][1] for s in p_shapes.values()])
+    else:
+        number_of_particles = 0
+    return field_units, data, number_of_particles
 
 
 def load_uniform_grid(data, domain_dimensions, length_unit=None, bbox=None,
@@ -567,8 +597,7 @@
       disappointing or non-existent in most cases.
     * Particles may be difficult to integrate.
 
-    Particle fields are detected as one-dimensional fields. The number of
-    particles is set by the "number_of_particles" key in data.
+    Particle fields are detected as one-dimensional fields.
 
     Parameters
     ----------
@@ -637,18 +666,18 @@
     domain_left_edge = np.array(bbox[:, 0], 'float64')
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
-    number_of_particles = data.pop("number_of_particles", 0)
-    # First we fix our field names
-    field_units, data = unitify_data(data)
-
-    dshape = tuple(domain_dimensions)
-    for field_name in data:
-        fshape = data[field_name].shape
-        if len(fshape) == 3 and fshape != dshape:
-            msg = ("Input data shape %s for field %s does not match provided "
-                   "domain_dimensions %s!")
-            msg = msg % (fshape, field_name, dshape)
-            raise RuntimeError(msg)
+    # If someone included this throw it away--old API
+    if "number_of_particles" in data:
+        issue_deprecation_warning("It is no longer necessary to include "
+                                  "the number of particles in the data "
+                                  "dict. The number of particles is "
+                                  "determined from the sizes of the "
+                                  "particle fields.")
+        data.pop("number_of_particles")
+    # First we fix our field names, apply units to data
+    # and check for consistency of field shapes
+    field_units, data, number_of_particles = process_data(
+        data, grid_dims=tuple(domain_dimensions))
 
     sfh = StreamDictFieldHandler()
 
@@ -838,9 +867,17 @@
         grid_right_edges[i,:] = g.pop("right_edge")
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
+        # If someone included this throw it away--old API
         if "number_of_particles" in g:
-            number_of_particles[i,:] = g.pop("number_of_particles")
-        field_units, data = unitify_data(g)
+            issue_deprecation_warning("It is no longer necessary to include "
+                                      "the number of particles in the data "
+                                      "dict. The number of particles is "
+                                      "determined from the sizes of the "
+                                      "particle fields.")
+            g.pop("number_of_particles")
+        field_units, data, n_particles = process_data(
+            g, grid_dims=tuple(grid_dimensions[i,:]))
+        number_of_particles[i, :] = n_particles
         sfh[i] = data
 
     # We now reconstruct our parent ids, so that our particle assignment can
@@ -1002,21 +1039,15 @@
 
         ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox)
 
-        cur_gc = ds.index.num_grids
-
-    ds.particle_types_raw = base_ds.particle_types_raw
-    ds.particle_types = ds.particle_types_raw
+        ds.particle_types_raw = base_ds.particle_types_raw
+        ds.particle_types = ds.particle_types_raw
 
-    # Now figure out where the particles go
-    if number_of_particles > 0:
-        # This will update the stream handler too
-        assign_particle_data(ds, pdata)
-        # Because we've already used the index, we
-        # have to re-create the field info because
-        # we added particle data after the fact
-        ds.index._reset_particle_count()
-        ds.index._detect_output_fields()
-        ds.create_field_info()
+        # Now figure out where the particles go
+        if number_of_particles > 0:
+            # This will update the stream handler too
+            assign_particle_data(ds, pdata)
+
+        cur_gc = ds.index.num_grids
 
     return ds
 
@@ -1113,7 +1144,7 @@
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
-    field_units, data = unitify_data(data)
+    field_units, data, _ = process_data(data)
     sfh = StreamDictFieldHandler()
 
     pdata = {}
@@ -1338,7 +1369,7 @@
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
-    field_units, data = unitify_data(data)
+    field_units, data, _ = process_data(data)
     sfh = StreamDictFieldHandler()
 
     particle_types = set_particle_types(data)
@@ -1585,7 +1616,7 @@
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
-    field_units, data = unitify_data(data)
+    field_units, data, _ = process_data(data)
     sfh = StreamDictFieldHandler()
 
     particle_types = set_particle_types(data)
@@ -1832,7 +1863,7 @@
     sfh.update({'connectivity': connectivity,
                 'coordinates': coordinates})
     for i, d in enumerate(data):
-        _f_unit, _data = unitify_data(d)
+        _f_unit, _data, _ = process_data(d)
         field_units.update(_f_unit)
         sfh[i] = _data
         particle_types.update(set_particle_types(d))

diff -r 026d5035a5464d1d8b669afd57c3c9f646a36d9d -r 662fdbe5331ad344f09868c8115f697e5a3bae62 yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -19,12 +19,17 @@
 import tempfile
 import unittest
 
-from yt.frontends.stream.data_structures import load_uniform_grid
+from yt.frontends.stream.data_structures import load_uniform_grid, \
+    load_particles
 from yt.testing import \
     assert_equal, \
     assert_raises
 from yt.convenience import load
-from yt.utilities.exceptions import YTOutputNotIdentified
+from yt.utilities.exceptions import \
+    YTOutputNotIdentified, \
+    YTInconsistentGridFieldShape, \
+    YTInconsistentParticleFieldShape, \
+    YTInconsistentGridFieldShapeGridDims
 
 class TestEmptyLoad(unittest.TestCase):
 
@@ -59,3 +64,35 @@
     dd = ds.all_data()
 
     assert_equal(Z.max(), dd["metallicity"].max())
+
+def test_inconsistent_field_shape():
+
+    def load_field_field_mismatch():
+        d = np.random.uniform(size=(32, 32, 32))
+        t = np.random.uniform(size=(32, 64, 32))
+        data = {"density": d, "temperature": t}
+        load_uniform_grid(data, (32,32,32))
+
+    assert_raises(YTInconsistentGridFieldShape,
+                  load_field_field_mismatch)
+
+    def load_field_grid_mismatch():
+        d = np.random.uniform(size=(32, 32, 32))
+        t = np.random.uniform(size=(32, 32, 32))
+        data = {"density": d, "temperature": t}
+        load_uniform_grid(data, (32,64,32))
+
+    assert_raises(YTInconsistentGridFieldShapeGridDims,
+                  load_field_grid_mismatch)
+
+    def load_particle_fields_mismatch():
+        x = np.random.uniform(size=100)
+        y = np.random.uniform(size=100)
+        z = np.random.uniform(size=200)
+        data = {"particle_position_x": x,
+                "particle_position_y": y,
+                "particle_position_z": z}
+        load_particles(data)
+
+    assert_raises(YTInconsistentParticleFieldShape,
+                  load_particle_fields_mismatch)
\ No newline at end of file

diff -r 026d5035a5464d1d8b669afd57c3c9f646a36d9d -r 662fdbe5331ad344f09868c8115f697e5a3bae62 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -39,8 +39,7 @@
         data = dict(left_edge=grid.LeftEdge,
                     right_edge=grid.RightEdge,
                     level=grid.Level,
-                    dimensions=grid.ActiveDimensions,
-                    number_of_particles=grid.NumberOfParticles)
+                    dimensions=grid.ActiveDimensions)
 
         for field in amr0.field_list:
             data[field] = grid[field]
@@ -54,8 +53,7 @@
                "particle_position_x": x,
                "particle_position_y": y,
                "particle_position_z": z,
-               "particle_mass": m,
-               "number_of_particles": num_particles}
+               "particle_mass": m}
 
     fields2 = fields1.copy()
 
@@ -105,11 +103,11 @@
         data = dict(left_edge=grid.LeftEdge,
                     right_edge=grid.RightEdge,
                     level=grid.Level,
-                    dimensions=grid.ActiveDimensions,
-                    number_of_particles=grid.NumberOfParticles)
+                    dimensions=grid.ActiveDimensions)
 
         for field in amr1.field_list:
-            data[field] = grid[field]
+            if field[0] != "all":
+                data[field] = grid[field]
 
         grid_data.append(data)
 
@@ -124,14 +122,14 @@
     assert_equal(number_of_particles1, number_of_particles2)
 
     for grid in amr1.index.grids:
-        tot_parts = grid["io","particle_position_x"].size
-        tot_all_parts = grid["all","particle_position_x"].size
+        tot_parts = grid["io", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 
     for grid in amr2.index.grids:
-        tot_parts = grid["io","particle_position_x"].size
-        tot_all_parts = grid["all","particle_position_x"].size
+        tot_parts = grid["io", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 
@@ -171,8 +169,7 @@
                ("star", "particle_position_x"): xs,
                ("star", "particle_position_y"): ys,
                ("star", "particle_position_z"): zs,
-               ("star", "particle_mass"): ms,
-               "number_of_particles": num_dm_particles+num_star_particles}
+               ("star", "particle_mass"): ms}
 
     fields4 = fields3.copy()
 
@@ -188,9 +185,9 @@
     assert_equal(number_of_particles3, number_of_particles4)
 
     for grid in ug4.index.grids:
-        tot_parts = grid["dm","particle_position_x"].size
-        tot_parts += grid["star","particle_position_x"].size
-        tot_all_parts = grid["all","particle_position_x"].size
+        tot_parts = grid["dm", "particle_position_x"].size
+        tot_parts += grid["star", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 
@@ -219,11 +216,11 @@
         data = dict(left_edge=grid.LeftEdge,
                     right_edge=grid.RightEdge,
                     level=grid.Level,
-                    dimensions=grid.ActiveDimensions,
-                    number_of_particles=grid.NumberOfParticles)
+                    dimensions=grid.ActiveDimensions)
 
         for field in amr3.field_list:
-            data[field] = grid[field]
+            if field[0] != "all":
+                data[field] = grid[field]
 
         grid_data.append(data)
 
@@ -248,9 +245,9 @@
         assert amr4._get_field_info(ptype, "particle_mass").particle_type
 
     for grid in amr3.index.grids:
-        tot_parts = grid["dm","particle_position_x"].size
-        tot_parts += grid["star","particle_position_x"].size
-        tot_all_parts = grid["all","particle_position_x"].size
+        tot_parts = grid["dm", "particle_position_x"].size
+        tot_parts += grid["star", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 

diff -r 026d5035a5464d1d8b669afd57c3c9f646a36d9d -r 662fdbe5331ad344f09868c8115f697e5a3bae62 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -661,3 +661,41 @@
             "on the parent level ({} axis)"
         ).format(self.level, self.axis)
         return msg
+
+class YTInconsistentGridFieldShape(YTException):
+    def __init__(self, shapes):
+        self.shapes = shapes
+
+    def __str__(self):
+        msg = "Not all grid-based fields have the same shape!\n"
+        for name, shape in self.shapes:
+            msg += "    Field {} has shape {}.\n".format(name, shape)
+        return msg
+
+class YTInconsistentParticleFieldShape(YTException):
+    def __init__(self, ptype, shapes):
+        self.ptype = ptype
+        self.shapes = shapes
+
+    def __str__(self):
+        msg = (
+            "Not all fields with field type '{}' have the same shape!\n"
+        ).format(self.ptype)
+        for name, shape in self.shapes:
+            field = (self.ptype, name)
+            msg += "    Field {} has shape {}.\n".format(field, shape)
+        return msg
+
+class YTInconsistentGridFieldShapeGridDims(YTException):
+    def __init__(self, shapes, grid_dims):
+        self.shapes = shapes
+        self.grid_dims = grid_dims
+
+    def __str__(self):
+        msg = "Not all grid-based fields match the grid dimensions! "
+        msg += "Grid dims are {}, ".format(self.grid_dims)
+        msg += "and the following fields have shapes that do not match them:\n"
+        for name, shape in self.shapes:
+            if shape != self.grid_dims:
+                msg += "    Field {} has shape {}.\n".format(name, shape)
+        return msg

diff -r 026d5035a5464d1d8b669afd57c3c9f646a36d9d -r 662fdbe5331ad344f09868c8115f697e5a3bae62 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -176,14 +176,12 @@
         grid_data = []
         for i, g in enumerate(self.ds.index.grids):
             data = {}
-            if clobber:
-                data["number_of_particles"] = self.NumberOfParticles[i]
-            else:
-                data["number_of_particles"] = self.NumberOfParticles[i] + \
-                                              g.NumberOfParticles
+            number_of_particles = self.NumberOfParticles[i]
+            if not clobber:
+                number_of_particles += g.NumberOfParticles
             grid_particles = self.get_for_grid(g)
             for field in self.field_list:
-                if data["number_of_particles"] > 0:
+                if number_of_particles > 0:
                     if g.NumberOfParticles > 0 and not clobber and \
                         field in self.ds.field_list:
                         # We have particles in this grid, we're not

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list