[yt-svn] commit/yt: 27 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jan 24 08:15:02 PST 2017


27 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/02b4fb8c4804/
Changeset:   02b4fb8c4804
Branch:      yt
User:        atmyers
Date:        2017-01-18 00:58:28+00:00
Summary:     some work towards supporting Boxlib particles.
Affected #:  3 files

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 02b4fb8c4804da750fb2693584448b4d0e05451e yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -16,6 +16,7 @@
 import inspect
 import os
 import re
+from collections import namedtuple
 
 from stat import ST_CTIME
 
@@ -40,8 +41,8 @@
 from .fields import \
     BoxlibFieldInfo, \
     MaestroFieldInfo, \
-    CastroFieldInfo
-
+    CastroFieldInfo, \
+    WarpXFieldInfo
 
 # This is what we use to find scientific notation that might include d's
 # instead of e's.
@@ -690,7 +691,7 @@
 
     def _read_particles(self):
         """
-        reads in particles and assigns them to grids. Will search for
+        Reads in particles and assigns them to grids. Will search for
         Star particles, then sink particles if no star particle file
         is found, and finally will simply note that no particles are
         found if neither works. To add a new Orion particle type,
@@ -778,6 +779,9 @@
         if os.path.exists(jobinfo_filename):
             return False
         # Now we check for all the others
+        warpx_jobinfo_filename = os.path.join(output_dir, "warpx_job_info")
+        if os.path.exists(warpx_jobinfo_filename):
+            return False
         lines = open(inputs_filename).readlines()
         if any(("castro." in line for line in lines)): return False
         if any(("nyx." in line for line in lines)): return False
@@ -1044,3 +1048,139 @@
     if len(vals) == 1:
         vals = vals[0]
     return vals
+
+
+class WarpXParticleHeader(object):
+
+    def __init__(self, header_filename):
+        with open(header_filename, "r") as f:
+            self.version_string = f.readline().strip()
+            self.real_type = self.version_string.split('_')[-1]
+            self.dim = int(f.readline().strip())
+            self.num_int_base = 2 + self.dim
+            self.num_real_base = self.dim
+            self.num_int_extra = 0  # this should be written by Boxlib, but isn't
+            self.num_real_extra = int(f.readline().strip())
+            self.num_int = self.num_int_base + self.num_int_extra
+            self.num_real = self.num_real_base + self.num_real_extra            
+            self.num_particles = int(f.readline().strip())
+            self.max_next_id = int(f.readline().strip())
+            self.finest_level = int(f.readline().strip())
+            self.num_levels = self.finest_level + 1
+
+            self.grids_per_level = np.zeros(self.num_levels, dtype='int64')
+            self.data_map = {}
+            for level_num in range(self.num_levels):
+                self.grids_per_level[level_num] = int(f.readline().strip())
+                self.data_map[level_num] = {}
+            
+            pfd = namedtuple("ParticleFileDescriptor",
+                             ["file_number", "num_particles", "offset"])
+
+            for level_num in range(self.num_levels):
+                for grid_num in range(self.grids_per_level[level_num]):
+                    entry = [int(val) for val in f.readline().strip().split()]
+                    self.data_map[level_num][grid_num] = pfd(*entry)
+
+        self._generate_particle_fields()
+
+    def _generate_particle_fields(self):
+        self.known_int_fields = [("io", "particle_id"),
+                                 ("io", "particle_cpu"),
+                                 ("io", "particle_cell_x"),
+                                 ("io", "particle_cell_y"),
+                                 ("io", "particle_cell_z")]
+        self.known_int_fields = self.known_int_fields[0:2+self.dim]
+
+        extra_int_fields = ["particle_int_comp%d" % i 
+                            for i in range(self.num_int_extra)]
+        self.known_int_fields.extend([("io", field) 
+                                      for field in extra_int_fields])
+
+        self.known_real_fields = [("io", "particle_position_x"),
+                                  ("io", "particle_position_y"),
+                                  ("io", "particle_position_z")]
+        self.known_real_fields = self.known_real_fields[0:self.dim]
+
+        extra_real_fields = ["particle_real_comp%d" % i 
+                             for i in range(self.num_real_extra)]
+        self.known_real_fields.extend([("io", field) 
+                                       for field in extra_real_fields])
+
+        self.known_fields = self.known_int_fields + self.known_real_fields
+
+
+class WarpXHierarchy(BoxlibHierarchy):
+
+    def __init__(self, ds, dataset_type='warpx_native'):
+        BoxlibHierarchy.__init__(self, ds, dataset_type)
+        self._read_particles()
+        
+    def _read_particles(self):
+        particle_header_file = self.ds.output_dir + "/particle/Header"
+        self.particle_header = WarpXParticleHeader(particle_header_file)
+        base_particle_fn = self.ds.output_dir + '/particle/Level_%d/DATA_%.4d'
+        gid = 0
+        for lev, data in self.particle_header.data_map.items():
+            for pdf in data.values():
+                self.grids[gid]._particle_filename = base_particle_fn % \
+                                                     (lev, pdf.file_number)
+                self.grid_particle_count[gid] += pdf.num_particles
+                self.grids[gid].NumberOfParticles = pdf.num_particles
+                self.grids[gid]._particle_offset = pdf.offset
+                gid += 1
+
+        # add particle fields to field_list
+        pfield_list = self.particle_header.known_fields
+        self.field_list.extend(pfield_list)
+
+    
+def _skip_line(line):
+    if len(line) == 0:
+        return True
+    if line[0] == '\n':
+        return True
+    if line[0] == "=":
+        return True
+    if line[0] == ' ':
+        return True
+
+
+class WarpXDataset(BoxlibDataset):
+
+    _index_class = WarpXHierarchy
+    _field_info_class = WarpXFieldInfo
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "warpx_job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        if os.path.exists(jobinfo_filename):
+            return True
+        return False
+
+    def _parse_parameter_file(self):
+        super(WarpXDataset, self)._parse_parameter_file()
+        jobinfo_filename = os.path.join(self.output_dir, "warpx_job_info")
+        with open(jobinfo_filename, "r") as f:
+            for line in f.readlines():
+                if _skip_line(line):
+                    continue
+                l = line.strip().split(":")
+                if len(l) == 2:
+                    self.parameters[l[0].strip()] = l[1].strip()
+                l = line.strip().split("=")
+                if len(l) == 2:
+                    self.parameters[l[0].strip()] = l[1].strip()
+                
+        # set the periodicity based on the integer BC runtime parameters
+        is_periodic = self.parameters['geometry.is_periodic'].split()
+        periodicity = [bool(val) for val in is_periodic]
+        self.periodicity = ensure_tuple(periodicity)

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 02b4fb8c4804da750fb2693584448b4d0e05451e yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -50,6 +50,9 @@
     tr *= (gamma - 1.0)
     return tr
 
+class WarpXFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+    known_particle_fields = ()
 
 class BoxlibFieldInfo(FieldInfoContainer):
     known_other_fields = (

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 02b4fb8c4804da750fb2693584448b4d0e05451e yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -1,5 +1,5 @@
 """
-Orion data-file handling functions
+Boxlib data-file handling functions
 
 
 


https://bitbucket.org/yt_analysis/yt/commits/0c5c0708cf0f/
Changeset:   0c5c0708cf0f
Branch:      yt
User:        atmyers
Date:        2017-01-18 19:28:48+00:00
Summary:     first stab at implementing boxlib particle IO.
Affected #:  2 files

diff -r 02b4fb8c4804da750fb2693584448b4d0e05451e -r 0c5c0708cf0f7e2437c5fecd04f3d769069cbb48 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -359,6 +359,7 @@
         self.field_order = [f for f in self.field_list]
 
     def _setup_data_io(self):
+        print(self.dataset_type)
         self.io = io_registry[self.dataset_type](self.dataset)
 
 
@@ -1055,7 +1056,17 @@
     def __init__(self, header_filename):
         with open(header_filename, "r") as f:
             self.version_string = f.readline().strip()
-            self.real_type = self.version_string.split('_')[-1]
+
+            particle_real_type = self.version_string.split('_')[-1]
+            particle_real_type = self.version_string.split('_')[-1]
+            if particle_real_type == 'double':
+                self.real_type = np.float64
+            elif particle_real_type == 'single':
+                self.real_type = np.float32
+            else:
+                raise RuntimeError("yt did not recognize particle real type.")
+            self.int_type = np.int32
+
             self.dim = int(f.readline().strip())
             self.num_int_base = 2 + self.dim
             self.num_real_base = self.dim
@@ -1085,6 +1096,8 @@
         self._generate_particle_fields()
 
     def _generate_particle_fields(self):
+
+        # these will always be there
         self.known_int_fields = [("io", "particle_id"),
                                  ("io", "particle_cpu"),
                                  ("io", "particle_cell_x"),
@@ -1092,16 +1105,19 @@
                                  ("io", "particle_cell_z")]
         self.known_int_fields = self.known_int_fields[0:2+self.dim]
 
+        # these are extra integer fields
         extra_int_fields = ["particle_int_comp%d" % i 
                             for i in range(self.num_int_extra)]
         self.known_int_fields.extend([("io", field) 
                                       for field in extra_int_fields])
 
+        # these real fields will always be there
         self.known_real_fields = [("io", "particle_position_x"),
                                   ("io", "particle_position_y"),
                                   ("io", "particle_position_z")]
         self.known_real_fields = self.known_real_fields[0:self.dim]
 
+        # these are the extras
         extra_real_fields = ["particle_real_comp%d" % i 
                              for i in range(self.num_real_extra)]
         self.known_real_fields.extend([("io", field) 
@@ -1109,17 +1125,23 @@
 
         self.known_fields = self.known_int_fields + self.known_real_fields
 
+        self.particle_int_dtype = np.dtype([(t[1], self.int_type) 
+                                            for t in self.known_int_fields])
+
+        self.particle_real_dtype = np.dtype([(t[1], self.real_type) 
+                                            for t in self.known_real_fields])
+        
 
 class WarpXHierarchy(BoxlibHierarchy):
 
-    def __init__(self, ds, dataset_type='warpx_native'):
-        BoxlibHierarchy.__init__(self, ds, dataset_type)
+    def __init__(self, ds, dataset_type="warpx_native"):
+        super(WarpXHierarchy, self).__init__(ds, dataset_type)
         self._read_particles()
         
     def _read_particles(self):
-        particle_header_file = self.ds.output_dir + "/particle/Header"
+        particle_header_file = self.ds.output_dir + "/particle0/Header"
         self.particle_header = WarpXParticleHeader(particle_header_file)
-        base_particle_fn = self.ds.output_dir + '/particle/Level_%d/DATA_%.4d'
+        base_particle_fn = self.ds.output_dir + '/particle0/Level_%d/DATA_%.4d'
         gid = 0
         for lev, data in self.particle_header.data_map.items():
             for pdf in data.values():
@@ -1168,6 +1190,7 @@
 
     def _parse_parameter_file(self):
         super(WarpXDataset, self)._parse_parameter_file()
+        self.dataset_type = "warpx_native"
         jobinfo_filename = os.path.join(self.output_dir, "warpx_job_info")
         with open(jobinfo_filename, "r") as f:
             for line in f.readlines():

diff -r 02b4fb8c4804da750fb2693584448b4d0e05451e -r 0c5c0708cf0f7e2437c5fecd04f3d769069cbb48 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -156,3 +156,55 @@
                     line = lines[num]
                     particles.append(read(line, field))
             return np.array(particles)
+
+
+class IOHandlerWarpX(IOHandlerBoxlib):
+    _dataset_type = "warpx_native"
+
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+
+        if selector.__class__.__name__ == "GridSelector":
+
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+
+            grid = chunks[0].objs[0]
+
+            for ftype, fname in fields:
+                rv[ftype, fname] = self._read_particles(grid, fname)
+
+            return rv
+
+        rv = {f: np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    data = self._read_particles(grid, fname)
+                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
+        return rv
+
+    def _read_particles(self, grid, name):
+
+        particles = []
+
+        if grid.NumberOfParticles == 0:
+            return np.array(particles)
+
+        pheader = self.ds.index.particle_header
+        
+        int_fnames = [fname for ftype, fname in pheader.known_int_fields]
+        if name in int_fnames:
+            ind = int_fnames.index(name)
+            with open(grid._particle_filename, "rb") as f:
+                idata = np.fromfile(f, pheader.int_type, grid.NumberOfParticles)
+                return np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
+
+        real_fnames = [fname for ftype, fname in pheader.known_real_fields]
+        if name in real_fnames:
+            ind = real_fnames.index(name)
+            with open(grid._particle_filename, "rb") as f:
+                f.seek(pheader.particle_int_dtype.itemsize * grid.NumberOfParticles)
+                rdata = np.fromfile(f, pheader.real_type, grid.NumberOfParticles)
+                return np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)


https://bitbucket.org/yt_analysis/yt/commits/108333fa42c0/
Changeset:   108333fa42c0
Branch:      yt
User:        atmyers
Date:        2017-01-18 19:39:32+00:00
Summary:     Correcting a silly mistake.
Affected #:  2 files

diff -r 0c5c0708cf0f7e2437c5fecd04f3d769069cbb48 -r 108333fa42c01f199c89e930355ad4661f3d7bb2 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -359,7 +359,6 @@
         self.field_order = [f for f in self.field_list]
 
     def _setup_data_io(self):
-        print(self.dataset_type)
         self.io = io_registry[self.dataset_type](self.dataset)
 
 

diff -r 0c5c0708cf0f7e2437c5fecd04f3d769069cbb48 -r 108333fa42c01f199c89e930355ad4661f3d7bb2 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -198,13 +198,17 @@
         if name in int_fnames:
             ind = int_fnames.index(name)
             with open(grid._particle_filename, "rb") as f:
-                idata = np.fromfile(f, pheader.int_type, grid.NumberOfParticles)
+                f.seek(grid._particle_offset)
+                idata = np.fromfile(f, pheader.int_type, 
+                                    pheader.num_int * grid.NumberOfParticles)
                 return np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
 
         real_fnames = [fname for ftype, fname in pheader.known_real_fields]
         if name in real_fnames:
             ind = real_fnames.index(name)
             with open(grid._particle_filename, "rb") as f:
+                f.seek(grid._particle_offset)
                 f.seek(pheader.particle_int_dtype.itemsize * grid.NumberOfParticles)
-                rdata = np.fromfile(f, pheader.real_type, grid.NumberOfParticles)
+                rdata = np.fromfile(f, pheader.real_type, 
+                                    pheader.num_real * grid.NumberOfParticles)
                 return np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)


https://bitbucket.org/yt_analysis/yt/commits/86326929cb18/
Changeset:   86326929cb18
Branch:      yt
User:        atmyers
Date:        2017-01-18 19:50:19+00:00
Summary:     Telling yt about the units of the particle position fields.
Affected #:  2 files

diff -r 108333fa42c01f199c89e930355ad4661f3d7bb2 -r 86326929cb18099f5e24b18db674c98d5098eeec yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -52,7 +52,11 @@
 
 class WarpXFieldInfo(FieldInfoContainer):
     known_other_fields = ()
-    known_particle_fields = ()
+    known_particle_fields = (
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+    )
 
 class BoxlibFieldInfo(FieldInfoContainer):
     known_other_fields = (

diff -r 108333fa42c01f199c89e930355ad4661f3d7bb2 -r 86326929cb18099f5e24b18db674c98d5098eeec yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -207,8 +207,8 @@
         if name in real_fnames:
             ind = real_fnames.index(name)
             with open(grid._particle_filename, "rb") as f:
-                f.seek(grid._particle_offset)
-                f.seek(pheader.particle_int_dtype.itemsize * grid.NumberOfParticles)
-                rdata = np.fromfile(f, pheader.real_type, 
+                f.seek(grid._particle_offset + 
+                       pheader.particle_int_dtype.itemsize * grid.NumberOfParticles)
+                rdata = np.fromfile(f, pheader.real_type,
                                     pheader.num_real * grid.NumberOfParticles)
                 return np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)


https://bitbucket.org/yt_analysis/yt/commits/888f37e760c1/
Changeset:   888f37e760c1
Branch:      yt
User:        atmyers
Date:        2017-01-19 00:58:05+00:00
Summary:     Also hooking up Nyx particles while we're at it.
Affected #:  2 files

diff -r 86326929cb18099f5e24b18db674c98d5098eeec -r 888f37e760c1ef36720f7c30e98fe1cf22420eb6 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -361,6 +361,26 @@
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.dataset)
 
+    def _read_particles(self, directory_name, is_checkpoint, extra_field_names=None):
+        particle_header_file = self.ds.output_dir + "/" + directory_name + "/Header"
+        self.particle_header = BoxLibParticleHeader(particle_header_file, 
+                                                    is_checkpoint,
+                                                    extra_field_names)
+        base_particle_fn = self.ds.output_dir + '/' + directory_name + "/Level_%d/DATA_%.4d"
+        gid = 0
+        for lev, data in self.particle_header.data_map.items():
+            for pdf in data.values():
+                self.grids[gid]._particle_filename = base_particle_fn % \
+                                                     (lev, pdf.file_number)
+                self.grid_particle_count[gid] += pdf.num_particles
+                self.grids[gid].NumberOfParticles = pdf.num_particles
+                self.grids[gid]._particle_offset = pdf.offset
+                gid += 1
+
+        # add particle fields to field_list
+        pfield_list = self.particle_header.known_fields
+        self.field_list.extend(pfield_list)
+
 
 class BoxlibDataset(Dataset):
     """
@@ -914,42 +934,8 @@
 
     def __init__(self, ds, dataset_type='nyx_native'):
         super(NyxHierarchy, self).__init__(ds, dataset_type)
-        self._read_particle_header()
-
-    def _read_particle_header(self):
-        if not self.ds.parameters["particles"]:
-            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
-            return
-        for fn in ['particle_position_%s' % ax for ax in 'xyz'] + \
-                  ['particle_mass'] +  \
-                  ['particle_velocity_%s' % ax for ax in 'xyz']:
-            self.field_list.append(("io", fn))
-        header = open(os.path.join(self.ds.output_dir, "DM", "Header"))
-        version = header.readline()  # NOQA
-        ndim = header.readline()  # NOQA
-        nfields = header.readline()  # NOQA
-        ntotalpart = int(header.readline())  # NOQA
-        nextid = header.readline()  # NOQA
-        maxlevel = int(header.readline())  # NOQA
 
-        # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel + 1):
-            header.readline()
-
-        grid_info = np.fromiter((int(i) for line in header.readlines()
-                                 for i in line.split()),
-                                dtype='int64',
-                                count=3*self.num_grids).reshape((self.num_grids, 3))
-        # we need grid_info in `populate_grid_objects`, so save it to self
-
-        for g, pg in izip(self.grids, grid_info):
-            g.particle_filename = os.path.join(self.ds.output_dir, "DM",
-                                               "Level_%s" % (g.Level),
-                                               "DATA_%04i" % pg[0])
-            g.NumberOfParticles = pg[1]
-            g._particle_offset = pg[2]
-
-        self.grid_particle_count[:, 0] = grid_info[:, 1]
+        self._read_particles("DM", False)
 
 
 class NyxDataset(BoxlibDataset):
@@ -1050,9 +1036,9 @@
     return vals
 
 
-class WarpXParticleHeader(object):
+class BoxLibParticleHeader(object):
 
-    def __init__(self, header_filename):
+    def __init__(self, header_filename, is_checkpoint, extra_field_names=None):
         with open(header_filename, "r") as f:
             self.version_string = f.readline().strip()
 
@@ -1078,6 +1064,14 @@
             self.finest_level = int(f.readline().strip())
             self.num_levels = self.finest_level + 1
 
+            # Boxlib particles can be written in checkpoint or plotfile mode
+            # The base integer fields are only there for checkpoints, but some
+            # codes use the checkpoint format for plotting
+            if not is_checkpoint:
+                self.num_int_base = 0
+                self.num_int_extra = 0
+                self.num_int = 0
+
             self.grids_per_level = np.zeros(self.num_levels, dtype='int64')
             self.data_map = {}
             for level_num in range(self.num_levels):
@@ -1092,17 +1086,17 @@
                     entry = [int(val) for val in f.readline().strip().split()]
                     self.data_map[level_num][grid_num] = pfd(*entry)
 
-        self._generate_particle_fields()
+        self._generate_particle_fields(extra_field_names)
 
-    def _generate_particle_fields(self):
+    def _generate_particle_fields(self, extra_field_names):
 
-        # these will always be there
+        # these are the 'base' integer fields
         self.known_int_fields = [("io", "particle_id"),
                                  ("io", "particle_cpu"),
                                  ("io", "particle_cell_x"),
                                  ("io", "particle_cell_y"),
                                  ("io", "particle_cell_z")]
-        self.known_int_fields = self.known_int_fields[0:2+self.dim]
+        self.known_int_fields = self.known_int_fields[0:self.num_int_base]
 
         # these are extra integer fields
         extra_int_fields = ["particle_int_comp%d" % i 
@@ -1110,17 +1104,21 @@
         self.known_int_fields.extend([("io", field) 
                                       for field in extra_int_fields])
 
-        # these real fields will always be there
+        # these are the base real fields
         self.known_real_fields = [("io", "particle_position_x"),
                                   ("io", "particle_position_y"),
                                   ("io", "particle_position_z")]
-        self.known_real_fields = self.known_real_fields[0:self.dim]
+        self.known_real_fields = self.known_real_fields[0:self.num_real_base]
 
         # these are the extras
-        extra_real_fields = ["particle_real_comp%d" % i 
-                             for i in range(self.num_real_extra)]
+        if extra_field_names is not None:
+            assert(len(extra_field_names) == self.num_real_extra)
+        else:
+            extra_field_names = ["particle_real_comp%d" % i 
+                                 for i in range(self.num_real_extra)]
+
         self.known_real_fields.extend([("io", field) 
-                                       for field in extra_real_fields])
+                                       for field in extra_field_names])
 
         self.known_fields = self.known_int_fields + self.known_real_fields
 
@@ -1129,32 +1127,14 @@
 
         self.particle_real_dtype = np.dtype([(t[1], self.real_type) 
                                             for t in self.known_real_fields])
-        
+
 
 class WarpXHierarchy(BoxlibHierarchy):
 
-    def __init__(self, ds, dataset_type="warpx_native"):
+    def __init__(self, ds, dataset_type="boxlib_native"):
         super(WarpXHierarchy, self).__init__(ds, dataset_type)
-        self._read_particles()
+        self._read_particles("particle0", True)
         
-    def _read_particles(self):
-        particle_header_file = self.ds.output_dir + "/particle0/Header"
-        self.particle_header = WarpXParticleHeader(particle_header_file)
-        base_particle_fn = self.ds.output_dir + '/particle0/Level_%d/DATA_%.4d'
-        gid = 0
-        for lev, data in self.particle_header.data_map.items():
-            for pdf in data.values():
-                self.grids[gid]._particle_filename = base_particle_fn % \
-                                                     (lev, pdf.file_number)
-                self.grid_particle_count[gid] += pdf.num_particles
-                self.grids[gid].NumberOfParticles = pdf.num_particles
-                self.grids[gid]._particle_offset = pdf.offset
-                gid += 1
-
-        # add particle fields to field_list
-        pfield_list = self.particle_header.known_fields
-        self.field_list.extend(pfield_list)
-
     
 def _skip_line(line):
     if len(line) == 0:
@@ -1189,7 +1169,6 @@
 
     def _parse_parameter_file(self):
         super(WarpXDataset, self)._parse_parameter_file()
-        self.dataset_type = "warpx_native"
         jobinfo_filename = os.path.join(self.output_dir, "warpx_job_info")
         with open(jobinfo_filename, "r") as f:
             for line in f.readlines():

diff -r 86326929cb18099f5e24b18db674c98d5098eeec -r 888f37e760c1ef36720f7c30e98fe1cf22420eb6 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -81,6 +81,59 @@
                         local_offset += size
         return data
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+
+        if selector.__class__.__name__ == "GridSelector":
+
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+
+            grid = chunks[0].objs[0]
+
+            for ftype, fname in fields:
+                rv[ftype, fname] = self._read_particles(grid, fname)
+
+            return rv
+
+        rv = {f: np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    data = self._read_particles(grid, fname)
+                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
+        return rv
+
+    def _read_particles(self, grid, name):
+
+        particles = []
+
+        if grid.NumberOfParticles == 0:
+            return np.array(particles)
+
+        pheader = self.ds.index.particle_header
+        
+        int_fnames = [fname for ftype, fname in pheader.known_int_fields]
+        if name in int_fnames:
+            ind = int_fnames.index(name)
+            with open(grid._particle_filename, "rb") as f:
+                f.seek(grid._particle_offset)
+                idata = np.fromfile(f, pheader.int_type, 
+                                    pheader.num_int * grid.NumberOfParticles)
+                return np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
+
+        real_fnames = [fname for ftype, fname in pheader.known_real_fields]
+        if name in real_fnames:
+            ind = real_fnames.index(name)
+            with open(grid._particle_filename, "rb") as f:
+                f.seek(grid._particle_offset + 
+                       pheader.particle_int_dtype.itemsize * grid.NumberOfParticles)
+                rdata = np.fromfile(f, pheader.real_type,
+                                    pheader.num_real * grid.NumberOfParticles)
+                return np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)
+
+
 class IOHandlerOrion(IOHandlerBoxlib):
     _dataset_type = "orion_native"
 
@@ -156,59 +209,3 @@
                     line = lines[num]
                     particles.append(read(line, field))
             return np.array(particles)
-
-
-class IOHandlerWarpX(IOHandlerBoxlib):
-    _dataset_type = "warpx_native"
-
-    def _read_particle_selection(self, chunks, selector, fields):
-        rv = {}
-        chunks = list(chunks)
-
-        if selector.__class__.__name__ == "GridSelector":
-
-            if not (len(chunks) == len(chunks[0].objs) == 1):
-                raise RuntimeError
-
-            grid = chunks[0].objs[0]
-
-            for ftype, fname in fields:
-                rv[ftype, fname] = self._read_particles(grid, fname)
-
-            return rv
-
-        rv = {f: np.array([]) for f in fields}
-        for chunk in chunks:
-            for grid in chunk.objs:
-                for ftype, fname in fields:
-                    data = self._read_particles(grid, fname)
-                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
-        return rv
-
-    def _read_particles(self, grid, name):
-
-        particles = []
-
-        if grid.NumberOfParticles == 0:
-            return np.array(particles)
-
-        pheader = self.ds.index.particle_header
-        
-        int_fnames = [fname for ftype, fname in pheader.known_int_fields]
-        if name in int_fnames:
-            ind = int_fnames.index(name)
-            with open(grid._particle_filename, "rb") as f:
-                f.seek(grid._particle_offset)
-                idata = np.fromfile(f, pheader.int_type, 
-                                    pheader.num_int * grid.NumberOfParticles)
-                return np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
-
-        real_fnames = [fname for ftype, fname in pheader.known_real_fields]
-        if name in real_fnames:
-            ind = real_fnames.index(name)
-            with open(grid._particle_filename, "rb") as f:
-                f.seek(grid._particle_offset + 
-                       pheader.particle_int_dtype.itemsize * grid.NumberOfParticles)
-                rdata = np.fromfile(f, pheader.real_type,
-                                    pheader.num_real * grid.NumberOfParticles)
-                return np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)


https://bitbucket.org/yt_analysis/yt/commits/1c6cd09df798/
Changeset:   1c6cd09df798
Branch:      yt
User:        atmyers
Date:        2017-01-19 01:11:24+00:00
Summary:     Hard-code the extra field names for Nyx and WarpX.
Affected #:  2 files

diff -r 888f37e760c1ef36720f7c30e98fe1cf22420eb6 -r 1c6cd09df7986da5da1ae6880d1fd1ecdf668ae7 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -40,6 +40,7 @@
 
 from .fields import \
     BoxlibFieldInfo, \
+    NyxFieldInfo, \
     MaestroFieldInfo, \
     CastroFieldInfo, \
     WarpXFieldInfo
@@ -362,11 +363,13 @@
         self.io = io_registry[self.dataset_type](self.dataset)
 
     def _read_particles(self, directory_name, is_checkpoint, extra_field_names=None):
+
         particle_header_file = self.ds.output_dir + "/" + directory_name + "/Header"
         self.particle_header = BoxLibParticleHeader(particle_header_file, 
                                                     is_checkpoint,
                                                     extra_field_names)
         base_particle_fn = self.ds.output_dir + '/' + directory_name + "/Level_%d/DATA_%.4d"
+
         gid = 0
         for lev, data in self.particle_header.data_map.items():
             for pdf in data.values():
@@ -935,12 +938,20 @@
     def __init__(self, ds, dataset_type='nyx_native'):
         super(NyxHierarchy, self).__init__(ds, dataset_type)
 
-        self._read_particles("DM", False)
+        # extra beyond the base real fields that all Boxlib
+        # particles have, i.e. the xyz positions
+        nyx_extra_real_fields = ['particle_mass',
+                                 'particle_velocity_x',
+                                 'particle_velocity_y',
+                                 'particle_velocity_z']
+
+        self._read_particles("DM", False, nyx_extra_real_fields)
 
 
 class NyxDataset(BoxlibDataset):
 
     _index_class = NyxHierarchy
+    _field_info_class = NyxFieldInfo
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
@@ -1133,6 +1144,14 @@
 
     def __init__(self, ds, dataset_type="boxlib_native"):
         super(WarpXHierarchy, self).__init__(ds, dataset_type)
+
+        # extra beyond the base real fields that all Boxlib
+        # particles have, i.e. the xyz positions
+        warpx_extra_real_fields = ['particle_weight',
+                                   'particle_velocity_x',
+                                   'particle_velocity_y',
+                                   'particle_velocity_z']
+
         self._read_particles("particle0", True)
         
     

diff -r 888f37e760c1ef36720f7c30e98fe1cf22420eb6 -r 1c6cd09df7986da5da1ae6880d1fd1ecdf668ae7 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -50,6 +50,7 @@
     tr *= (gamma - 1.0)
     return tr
 
+
 class WarpXFieldInfo(FieldInfoContainer):
     known_other_fields = ()
     known_particle_fields = (
@@ -58,6 +59,15 @@
         ("particle_position_z", ("code_length", [], None)),
     )
 
+
+class NyxFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+    known_particle_fields = (
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+    )
+
 class BoxlibFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", (rho_units, ["density"], None)),


https://bitbucket.org/yt_analysis/yt/commits/061597f57954/
Changeset:   061597f57954
Branch:      yt
User:        atmyers
Date:        2017-01-19 01:20:01+00:00
Summary:     A little bit of prettifying
Affected #:  1 file

diff -r 1c6cd09df7986da5da1ae6880d1fd1ecdf668ae7 -r 061597f57954f8c9d16f6ebd835690181f2aa19e yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -945,7 +945,9 @@
                                  'particle_velocity_y',
                                  'particle_velocity_z']
 
-        self._read_particles("DM", False, nyx_extra_real_fields)
+        is_checkpoint = False
+
+        self._read_particles("DM", True, nyx_extra_real_fields)
 
 
 class NyxDataset(BoxlibDataset):
@@ -1152,7 +1154,9 @@
                                    'particle_velocity_y',
                                    'particle_velocity_z']
 
-        self._read_particles("particle0", True)
+        is_checkpoint = True
+
+        self._read_particles("particle0", is_checkpoint, warpx_extra_real_fields)
         
     
 def _skip_line(line):


https://bitbucket.org/yt_analysis/yt/commits/7fa941fae1eb/
Changeset:   7fa941fae1eb
Branch:      yt
User:        atmyers
Date:        2017-01-19 02:01:41+00:00
Summary:     messed up is_checkpoint for Nyx somehow.
Affected #:  1 file

diff -r 061597f57954f8c9d16f6ebd835690181f2aa19e -r 7fa941fae1eb628679d164c7d7ed3d00393aa8e9 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -947,7 +947,7 @@
 
         is_checkpoint = False
 
-        self._read_particles("DM", True, nyx_extra_real_fields)
+        self._read_particles("DM", is_checkpoint, nyx_extra_real_fields)
 
 
 class NyxDataset(BoxlibDataset):


https://bitbucket.org/yt_analysis/yt/commits/3f12404ad75f/
Changeset:   3f12404ad75f
Branch:      yt
User:        atmyers
Date:        2017-01-19 02:40:02+00:00
Summary:     remove unused import.
Affected #:  1 file

diff -r 7fa941fae1eb628679d164c7d7ed3d00393aa8e9 -r 3f12404ad75fc2391b3876777d55431dec00715e yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -27,7 +27,6 @@
     mylog, \
     setdefaultattr
 from yt.data_objects.grid_patch import AMRGridPatch
-from yt.extern.six.moves import zip as izip
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
 


https://bitbucket.org/yt_analysis/yt/commits/0e70c4d1e3ba/
Changeset:   0e70c4d1e3ba
Branch:      yt
User:        atmyers
Date:        2017-01-20 22:13:56+00:00
Summary:     Parsing the additional WarpX species information.
Affected #:  2 files

diff -r 7fa941fae1eb628679d164c7d7ed3d00393aa8e9 -r 0e70c4d1e3ba67389b62e041b9ba2b2019a2307d yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -30,6 +30,7 @@
 from yt.extern.six.moves import zip as izip
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
+from yt.units import YTQuantity
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
@@ -1158,6 +1159,27 @@
 
         self._read_particles("particle0", is_checkpoint, warpx_extra_real_fields)
         
+        # Additional WarpX particle information (used to set up species)
+        with open(self.ds.output_dir + "/WarpXHeader", 'r') as f:
+
+            # skip to the end, where species info is written out
+            line = f.readline()
+            while line and line != ')\n':
+                line = f.readline()
+            line = f.readline()
+
+            # Read in the species information
+            species_id = 0
+            while line:
+                line = line.strip().split()
+                charge = YTQuantity(float(line[0]), "C")
+                mass = YTQuantity(float(line[1]), "kg")
+                charge_name = 'particle%.1d_charge' % species_id
+                mass_name = 'particle%.1d_mass' % species_id
+                self.parameters[charge_name] = charge
+                self.parameters[mass_name] = mass
+                line = f.readline()
+                species_id += 1
     
 def _skip_line(line):
     if len(line) == 0:

diff -r 7fa941fae1eb628679d164c7d7ed3d00393aa8e9 -r 0e70c4d1e3ba67389b62e041b9ba2b2019a2307d yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -15,7 +15,7 @@
 import re
 
 from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, amu_cgs
+    boltzmann_constant_cgs, amu_cgs, me
 from yt.fields.field_info_container import \
     FieldInfoContainer
 
@@ -59,6 +59,26 @@
         ("particle_position_z", ("code_length", [], None)),
     )
 
+    def setup_particle_fields(self, ptype):
+
+        def get_mass(field, data):
+            species_mass = data.ds.index.parameters['particle0_mass']
+            return data["particle_weight"]*species_mass
+
+        self.add_field((ptype, "particle_mass"), sampling_type="particle",
+                       function=get_mass,
+                       units="kg")
+
+        def get_charge(field, data):
+            species_charge = data.ds.index.parameters['particle0_charge']
+            return data["particle_weight"]*species_charge
+
+        self.add_field((ptype, "particle_charge"), sampling_type="particle",
+                       function=get_charge,
+                       units="C")
+
+        super(WarpXFieldInfo, self).setup_particle_fields(ptype)
+
 
 class NyxFieldInfo(FieldInfoContainer):
     known_other_fields = ()


https://bitbucket.org/yt_analysis/yt/commits/b20bfd40fcf0/
Changeset:   b20bfd40fcf0
Branch:      yt
User:        atmyers
Date:        2017-01-20 22:31:27+00:00
Summary:     WarpX outputs no longer write the particles in checkpoint mode.
Affected #:  1 file

diff -r 0e70c4d1e3ba67389b62e041b9ba2b2019a2307d -r b20bfd40fcf0af2033a39829c236650384c10b9c yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1155,7 +1155,7 @@
                                    'particle_velocity_y',
                                    'particle_velocity_z']
 
-        is_checkpoint = True
+        is_checkpoint = False
 
         self._read_particles("particle0", is_checkpoint, warpx_extra_real_fields)
         


https://bitbucket.org/yt_analysis/yt/commits/7f00ed848898/
Changeset:   7f00ed848898
Branch:      yt
User:        atmyers
Date:        2017-01-20 22:33:25+00:00
Summary:     merging.
Affected #:  1 file

diff -r b20bfd40fcf0af2033a39829c236650384c10b9c -r 7f00ed848898de1131270bfb4f94594b0181c539 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -27,7 +27,6 @@
     mylog, \
     setdefaultattr
 from yt.data_objects.grid_patch import AMRGridPatch
-from yt.extern.six.moves import zip as izip
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
 from yt.units import YTQuantity


https://bitbucket.org/yt_analysis/yt/commits/34d6b94daa24/
Changeset:   34d6b94daa24
Branch:      yt
User:        atmyers
Date:        2017-01-20 23:30:46+00:00
Summary:     Using the correct unit system in the WarpX frontend.
Affected #:  2 files

diff -r 7f00ed848898de1131270bfb4f94594b0181c539 -r 34d6b94daa242a0a43b04309ba19adf94acdfe6d yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1196,6 +1196,22 @@
     _index_class = WarpXHierarchy
     _field_info_class = WarpXFieldInfo
 
+    def __init__(self, output_dir,
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
+                 dataset_type='boxlib_native',
+                 storage_filename=None,
+                 units_override=None,
+                 unit_system="mks"):
+
+        super(WarpXDataset, self).__init__(output_dir,
+                                           cparam_filename,
+                                           fparam_filename,
+                                           dataset_type,
+                                           storage_filename,
+                                           units_override,
+                                           unit_system)
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         # fill our args
@@ -1229,3 +1245,9 @@
         is_periodic = self.parameters['geometry.is_periodic'].split()
         periodicity = [bool(val) for val in is_periodic]
         self.periodicity = ensure_tuple(periodicity)
+
+    def _set_code_unit_attributes(self):
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "m"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "kg"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "m/s"))

diff -r 7f00ed848898de1131270bfb4f94594b0181c539 -r 34d6b94daa242a0a43b04309ba19adf94acdfe6d yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -52,11 +52,25 @@
 
 
 class WarpXFieldInfo(FieldInfoContainer):
-    known_other_fields = ()
+    known_other_fields = (
+        ("Bx", ("T", ["magnetic_field_x"], None)),
+        ("By", ("T", ["magnetic_field_y"], None)),
+        ("Bz", ("T", ["magnetic_field_z"], None)),
+        ("Ex", ("V/m", ["electric_field_x"], None)),
+        ("Ey", ("V/m", ["electric_field_y"], None)),
+        ("Ex", ("V/m", ["electric_field_z"], None)),
+        ("jx", ("A", ["current_x"], None)),
+        ("jy", ("A", ["current_y"], None)),
+        ("jz", ("A", ["current_z"], None)),
+    )
     known_particle_fields = (
-        ("particle_position_x", ("code_length", [], None)),
-        ("particle_position_y", ("code_length", [], None)),
-        ("particle_position_z", ("code_length", [], None)),
+        ("particle_weight", ("", [], None)),
+        ("particle_position_x", ("m", [], None)),
+        ("particle_position_y", ("m", [], None)),
+        ("particle_position_z", ("m", [], None)),
+        ("particle_velocity_x", ("m/s", [], None)),
+        ("particle_velocity_y", ("m/s", [], None)),
+        ("particle_velocity_z", ("m/s", [], None)),
     )
 
     def setup_particle_fields(self, ptype):


https://bitbucket.org/yt_analysis/yt/commits/b12fd002f9b5/
Changeset:   b12fd002f9b5
Branch:      yt
User:        atmyers
Date:        2017-01-20 23:31:45+00:00
Summary:     removing unused import
Affected #:  1 file

diff -r 34d6b94daa242a0a43b04309ba19adf94acdfe6d -r b12fd002f9b5a7c9ecb011704f6d314d3ed50ee4 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -15,7 +15,7 @@
 import re
 
 from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, amu_cgs, me
+    boltzmann_constant_cgs, amu_cgs
 from yt.fields.field_info_container import \
     FieldInfoContainer
 


https://bitbucket.org/yt_analysis/yt/commits/405b61d39273/
Changeset:   405b61d39273
Branch:      yt
User:        atmyers
Date:        2017-01-21 06:09:49+00:00
Summary:     enabling multi-species support for WarpX particle data.
Affected #:  3 files

diff -r b12fd002f9b5a7c9ecb011704f6d314d3ed50ee4 -r 405b61d39273db6932b6cbd87ebfdd46eee9acea yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -21,6 +21,7 @@
 from stat import ST_CTIME
 
 import numpy as np
+import glob
 
 from yt.funcs import \
     ensure_tuple, \
@@ -76,6 +77,7 @@
         self._base_offset = offset
         self._parent_id = []
         self._children_ids = []
+        self._pdata = {}
 
     def _prepare_grid(self):
         super(BoxlibGrid, self)._prepare_grid()
@@ -144,6 +146,7 @@
         self.dataset_type = dataset_type
         self.header_filename = os.path.join(ds.output_dir, 'Header')
         self.directory = ds.output_dir
+        self.particle_headers = {}
 
         GridIndex.__init__(self, ds, dataset_type)
         self._cache_endianness(self.grids[-1])
@@ -364,24 +367,28 @@
 
     def _read_particles(self, directory_name, is_checkpoint, extra_field_names=None):
 
-        particle_header_file = self.ds.output_dir + "/" + directory_name + "/Header"
-        self.particle_header = BoxLibParticleHeader(particle_header_file, 
-                                                    is_checkpoint,
-                                                    extra_field_names)
+        self.particle_headers[directory_name] = BoxLibParticleHeader(self.ds,
+                                                                     directory_name,
+                                                                     is_checkpoint,
+                                                                     extra_field_names)
+
         base_particle_fn = self.ds.output_dir + '/' + directory_name + "/Level_%d/DATA_%.4d"
 
         gid = 0
-        for lev, data in self.particle_header.data_map.items():
+        for lev, data in self.particle_headers[directory_name].data_map.items():
             for pdf in data.values():
-                self.grids[gid]._particle_filename = base_particle_fn % \
-                                                     (lev, pdf.file_number)
+                pdict = self.grids[gid]._pdata
+                pdict[directory_name] = {}
+                pdict[directory_name]["particle_filename"] = base_particle_fn % \
+                                                             (lev, pdf.file_number)
+                pdict[directory_name]["offset"] = pdf.offset
+                pdict[directory_name]["NumberOfParticles"] = pdf.num_particles
                 self.grid_particle_count[gid] += pdf.num_particles
-                self.grids[gid].NumberOfParticles = pdf.num_particles
-                self.grids[gid]._particle_offset = pdf.offset
+                self.grids[gid].NumberOfParticles += pdf.num_particles
                 gid += 1
 
         # add particle fields to field_list
-        pfield_list = self.particle_header.known_fields
+        pfield_list = self.particle_headers[directory_name].known_fields
         self.field_list.extend(pfield_list)
 
 
@@ -1051,7 +1058,10 @@
 
 class BoxLibParticleHeader(object):
 
-    def __init__(self, header_filename, is_checkpoint, extra_field_names=None):
+    def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None):
+
+        self.species_name = directory_name
+        header_filename =  ds.output_dir + "/" + directory_name + "/Header"
         with open(header_filename, "r") as f:
             self.version_string = f.readline().strip()
 
@@ -1104,23 +1114,23 @@
     def _generate_particle_fields(self, extra_field_names):
 
         # these are the 'base' integer fields
-        self.known_int_fields = [("io", "particle_id"),
-                                 ("io", "particle_cpu"),
-                                 ("io", "particle_cell_x"),
-                                 ("io", "particle_cell_y"),
-                                 ("io", "particle_cell_z")]
+        self.known_int_fields = [(self.species_name, "particle_id"),
+                                 (self.species_name, "particle_cpu"),
+                                 (self.species_name, "particle_cell_x"),
+                                 (self.species_name, "particle_cell_y"),
+                                 (self.species_name, "particle_cell_z")]
         self.known_int_fields = self.known_int_fields[0:self.num_int_base]
 
         # these are extra integer fields
         extra_int_fields = ["particle_int_comp%d" % i 
                             for i in range(self.num_int_extra)]
-        self.known_int_fields.extend([("io", field) 
+        self.known_int_fields.extend([(self.species_name, field) 
                                       for field in extra_int_fields])
 
         # these are the base real fields
-        self.known_real_fields = [("io", "particle_position_x"),
-                                  ("io", "particle_position_y"),
-                                  ("io", "particle_position_z")]
+        self.known_real_fields = [(self.species_name, "particle_position_x"),
+                                  (self.species_name, "particle_position_y"),
+                                  (self.species_name, "particle_position_z")]
         self.known_real_fields = self.known_real_fields[0:self.num_real_base]
 
         # these are the extras
@@ -1130,7 +1140,7 @@
             extra_field_names = ["particle_real_comp%d" % i 
                                  for i in range(self.num_real_extra)]
 
-        self.known_real_fields.extend([("io", field) 
+        self.known_real_fields.extend([(self.species_name, field) 
                                        for field in extra_field_names])
 
         self.known_fields = self.known_int_fields + self.known_real_fields
@@ -1156,7 +1166,8 @@
 
         is_checkpoint = False
 
-        self._read_particles("particle0", is_checkpoint, warpx_extra_real_fields)
+        for ptype in self.ds.particle_types:
+            self._read_particles(ptype, is_checkpoint, warpx_extra_real_fields)
         
         # Additional WarpX particle information (used to set up species)
         with open(self.ds.output_dir + "/WarpXHeader", 'r') as f:
@@ -1246,6 +1257,14 @@
         periodicity = [bool(val) for val in is_periodic]
         self.periodicity = ensure_tuple(periodicity)
 
+        species_list = []
+        species_dirs = glob.glob(self.output_dir + "/particle*")
+        for species in species_dirs:
+            species_list.append(species[len(self.output_dir)+1:])
+        self.particle_types = tuple(species_list)
+        self.particle_types_raw = self.particle_types
+
+
     def _set_code_unit_attributes(self):
         setdefaultattr(self, 'length_unit', self.quan(1.0, "m"))
         setdefaultattr(self, 'mass_unit', self.quan(1.0, "kg"))

diff -r b12fd002f9b5a7c9ecb011704f6d314d3ed50ee4 -r 405b61d39273db6932b6cbd87ebfdd46eee9acea yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -52,6 +52,7 @@
 
 
 class WarpXFieldInfo(FieldInfoContainer):
+
     known_other_fields = (
         ("Bx", ("T", ["magnetic_field_x"], None)),
         ("By", ("T", ["magnetic_field_y"], None)),
@@ -63,6 +64,7 @@
         ("jy", ("A", ["current_y"], None)),
         ("jz", ("A", ["current_z"], None)),
     )
+
     known_particle_fields = (
         ("particle_weight", ("", [], None)),
         ("particle_position_x", ("m", [], None)),
@@ -73,10 +75,16 @@
         ("particle_velocity_z", ("m/s", [], None)),
     )
 
+    extra_union_fields = (
+        ("kg", "particle_mass"),
+        ("C", "particle_charge"),
+        ("", "particle_ones"),
+    )
+
     def setup_particle_fields(self, ptype):
 
         def get_mass(field, data):
-            species_mass = data.ds.index.parameters['particle0_mass']
+            species_mass = data.ds.index.parameters[ptype + '_mass']
             return data["particle_weight"]*species_mass
 
         self.add_field((ptype, "particle_mass"), sampling_type="particle",
@@ -84,7 +92,7 @@
                        units="kg")
 
         def get_charge(field, data):
-            species_charge = data.ds.index.parameters['particle0_charge']
+            species_charge = data.ds.index.parameters[ptype + '_charge']
             return data["particle_weight"]*species_charge
 
         self.add_field((ptype, "particle_charge"), sampling_type="particle",
@@ -102,6 +110,7 @@
         ("particle_position_z", ("code_length", [], None)),
     )
 
+
 class BoxlibFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", (rho_units, ["density"], None)),

diff -r b12fd002f9b5a7c9ecb011704f6d314d3ed50ee4 -r 405b61d39273db6932b6cbd87ebfdd46eee9acea yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -84,6 +84,7 @@
     def _read_particle_selection(self, chunks, selector, fields):
         rv = {}
         chunks = list(chunks)
+        unions = self.ds.particle_unions
 
         if selector.__class__.__name__ == "GridSelector":
 
@@ -93,44 +94,59 @@
             grid = chunks[0].objs[0]
 
             for ftype, fname in fields:
-                rv[ftype, fname] = self._read_particles(grid, fname)
-
+                if ftype in unions:
+                    udata = np.array([])
+                    for subtype in unions[ftype]:
+                        data = self._read_particles(grid, subtype, fname)
+                        udata = np.concatenate((udata, data))
+                    rv[ftype, fname] = udata
+                else:
+                    rv[ftype, fname] = self._read_particles(grid, ftype, fname)
             return rv
 
         rv = {f: np.array([]) for f in fields}
         for chunk in chunks:
             for grid in chunk.objs:
                 for ftype, fname in fields:
-                    data = self._read_particles(grid, fname)
-                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
+                    if ftype in unions:
+                        for subtype in unions[ftype]:
+                            data = self._read_particles(grid, subtype, fname)
+                            rv[ftype, fname] = np.concatenate((data, 
+                                                               rv[ftype, fname]))
+                    else:
+                        data = self._read_particles(grid, ftype, fname)
+                        rv[ftype, fname] = np.concatenate((data, 
+                                                           rv[ftype, fname]))
         return rv
 
-    def _read_particles(self, grid, name):
+    def _read_particles(self, grid, ftype, name):
 
         particles = []
+        npart = grid._pdata[ftype]["NumberOfParticles"]
 
-        if grid.NumberOfParticles == 0:
+        if npart == 0:
             return np.array(particles)
 
-        pheader = self.ds.index.particle_header
+        fn = grid._pdata[ftype]["particle_filename"]
+        offset = grid._pdata[ftype]["offset"]
+        pheader = self.ds.index.particle_headers[ftype]
         
         int_fnames = [fname for ftype, fname in pheader.known_int_fields]
         if name in int_fnames:
             ind = int_fnames.index(name)
-            with open(grid._particle_filename, "rb") as f:
-                f.seek(grid._particle_offset)
-                idata = np.fromfile(f, pheader.int_type, 
-                                    pheader.num_int * grid.NumberOfParticles)
+            fn = grid._pdata[ftype]["particle_filename"]
+            with open(fn, "rb") as f:
+                f.seek(offset)
+                idata = np.fromfile(f, pheader.int_type, pheader.num_int * npart)
                 return np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
 
         real_fnames = [fname for ftype, fname in pheader.known_real_fields]
         if name in real_fnames:
             ind = real_fnames.index(name)
-            with open(grid._particle_filename, "rb") as f:
-                f.seek(grid._particle_offset + 
-                       pheader.particle_int_dtype.itemsize * grid.NumberOfParticles)
-                rdata = np.fromfile(f, pheader.real_type,
-                                    pheader.num_real * grid.NumberOfParticles)
+            with open(fn, "rb") as f:
+                f.seek(offset + 
+                       pheader.particle_int_dtype.itemsize * npart)
+                rdata = np.fromfile(f, pheader.real_type, pheader.num_real * npart)
                 return np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)
 
 


https://bitbucket.org/yt_analysis/yt/commits/3ad6f4fb1bc5/
Changeset:   3ad6f4fb1bc5
Branch:      yt
User:        atmyers
Date:        2017-01-21 06:52:22+00:00
Summary:     The Nyx particles are labelled DM.
Affected #:  1 file

diff -r 405b61d39273db6932b6cbd87ebfdd46eee9acea -r 3ad6f4fb1bc54af23cfa05e58b05a1dd3d7df2b1 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1023,7 +1023,7 @@
         if os.path.isdir(os.path.join(self.output_dir, "DM")):
             # we have particles
             self.parameters["particles"] = 1 
-            self.particle_types = ("io",)
+            self.particle_types = ("DM",)
             self.particle_types_raw = self.particle_types
 
     def _set_code_unit_attributes(self):


https://bitbucket.org/yt_analysis/yt/commits/c21ac164de82/
Changeset:   c21ac164de82
Branch:      yt
User:        atmyers
Date:        2017-01-21 07:07:40+00:00
Summary:     Fixing flake8 style error.
Affected #:  1 file

diff -r 3ad6f4fb1bc54af23cfa05e58b05a1dd3d7df2b1 -r c21ac164de82215f3a0f2c29104de6772ea82478 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -131,7 +131,7 @@
         offset = grid._pdata[ftype]["offset"]
         pheader = self.ds.index.particle_headers[ftype]
         
-        int_fnames = [fname for ftype, fname in pheader.known_int_fields]
+        int_fnames = [fname for _, fname in pheader.known_int_fields]
         if name in int_fnames:
             ind = int_fnames.index(name)
             fn = grid._pdata[ftype]["particle_filename"]
@@ -140,7 +140,7 @@
                 idata = np.fromfile(f, pheader.int_type, pheader.num_int * npart)
                 return np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
 
-        real_fnames = [fname for ftype, fname in pheader.known_real_fields]
+        real_fnames = [fname for _, fname in pheader.known_real_fields]
         if name in real_fnames:
             ind = real_fnames.index(name)
             with open(fn, "rb") as f:


https://bitbucket.org/yt_analysis/yt/commits/87fdcde367c2/
Changeset:   87fdcde367c2
Branch:      yt
User:        atmyers
Date:        2017-01-21 18:04:59+00:00
Summary:     update code support doc.
Affected #:  1 file

diff -r c21ac164de82215f3a0f2c29104de6772ea82478 -r 87fdcde367c2821282979d9a7841787641d27897 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -46,7 +46,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | MOAB                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| Nyx                   |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
+| Nyx                   |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | openPMD               |     Y      |     Y     |      N     |   Y   |    Y     |    Y     |     N      | Partial  |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
@@ -62,6 +62,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Tipsy                 |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| WarpX                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 
 .. [#f1] one-dimensional base-state not read in currently.
 .. [#f2] These handle mesh fields using an in-memory octree that has not been parallelized.


https://bitbucket.org/yt_analysis/yt/commits/1123aaf88f78/
Changeset:   1123aaf88f78
Branch:      yt
User:        atmyers
Date:        2017-01-21 22:04:53+00:00
Summary:     Fix particle selection and remove some special casing.
Affected #:  1 file

diff -r 87fdcde367c2821282979d9a7841787641d27897 -r 1123aaf88f78abf0a3f589547fe682b46bfde190 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -86,68 +86,78 @@
         chunks = list(chunks)
         unions = self.ds.particle_unions
 
-        if selector.__class__.__name__ == "GridSelector":
-
-            if not (len(chunks) == len(chunks[0].objs) == 1):
-                raise RuntimeError
-
-            grid = chunks[0].objs[0]
-
-            for ftype, fname in fields:
-                if ftype in unions:
-                    udata = np.array([])
-                    for subtype in unions[ftype]:
-                        data = self._read_particles(grid, subtype, fname)
-                        udata = np.concatenate((udata, data))
-                    rv[ftype, fname] = udata
-                else:
-                    rv[ftype, fname] = self._read_particles(grid, ftype, fname)
-            return rv
-
         rv = {f: np.array([]) for f in fields}
         for chunk in chunks:
             for grid in chunk.objs:
                 for ftype, fname in fields:
                     if ftype in unions:
                         for subtype in unions[ftype]:
-                            data = self._read_particles(grid, subtype, fname)
-                            rv[ftype, fname] = np.concatenate((data, 
+                            data = self._read_particles(grid, selector,
+                                                        subtype, fname)
+                            rv[ftype, fname] = np.concatenate((data,
                                                                rv[ftype, fname]))
                     else:
-                        data = self._read_particles(grid, ftype, fname)
-                        rv[ftype, fname] = np.concatenate((data, 
+                        data = self._read_particles(grid, selector,
+                                                    ftype, fname)
+                        rv[ftype, fname] = np.concatenate((data,
                                                            rv[ftype, fname]))
         return rv
 
-    def _read_particles(self, grid, ftype, name):
+    def _read_particles(self, grid, selector, ftype, name):
 
-        particles = []
         npart = grid._pdata[ftype]["NumberOfParticles"]
-
         if npart == 0:
-            return np.array(particles)
+            return np.array([])
 
         fn = grid._pdata[ftype]["particle_filename"]
         offset = grid._pdata[ftype]["offset"]
         pheader = self.ds.index.particle_headers[ftype]
         
+        # handle the case that this is an integer field
         int_fnames = [fname for _, fname in pheader.known_int_fields]
         if name in int_fnames:
             ind = int_fnames.index(name)
             fn = grid._pdata[ftype]["particle_filename"]
             with open(fn, "rb") as f:
+
+                # read in the position fields for selection
+                f.seek(offset + 
+                       pheader.particle_int_dtype.itemsize * npart)
+                rdata = np.fromfile(f, pheader.real_type, pheader.num_real * npart)
+                x = np.asarray(rdata[0::pheader.num_real], dtype=np.float64)
+                y = np.asarray(rdata[1::pheader.num_real], dtype=np.float64)
+                z = np.asarray(rdata[2::pheader.num_real], dtype=np.float64)
+                mask = selector.select_points(x, y, z, 0.0)
+
+                if mask is None:
+                    return np.array([])
+                
+                # read in the data we want
                 f.seek(offset)
                 idata = np.fromfile(f, pheader.int_type, pheader.num_int * npart)
-                return np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
+                data = np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
+                return data[mask].flatten()
 
+        # handle case that this is a real field
         real_fnames = [fname for _, fname in pheader.known_real_fields]
         if name in real_fnames:
             ind = real_fnames.index(name)
             with open(fn, "rb") as f:
+
+                # read in the position fields for selection
                 f.seek(offset + 
                        pheader.particle_int_dtype.itemsize * npart)
                 rdata = np.fromfile(f, pheader.real_type, pheader.num_real * npart)
-                return np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)
+                x = np.asarray(rdata[0::pheader.num_real], dtype=np.float64)
+                y = np.asarray(rdata[1::pheader.num_real], dtype=np.float64)
+                z = np.asarray(rdata[2::pheader.num_real], dtype=np.float64)
+                mask = selector.select_points(x, y, z, 0.0)
+
+                if mask is None:
+                    return np.array([])
+
+                data = np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)
+                return data[mask].flatten()
 
 
 class IOHandlerOrion(IOHandlerBoxlib):


https://bitbucket.org/yt_analysis/yt/commits/04adaff1aecd/
Changeset:   04adaff1aecd
Branch:      yt
User:        atmyers
Date:        2017-01-22 19:40:35+00:00
Summary:     Adding a note to the loading data section about BoxLib particle data.
Affected #:  1 file

diff -r 1123aaf88f78abf0a3f589547fe682b46bfde190 -r 04adaff1aecd5e9e915f41124b05f6affe54aab4 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -274,13 +274,13 @@
 BoxLib Data
 -----------
 
-yt has been tested with BoxLib data generated by Orion, Nyx, Maestro and
-Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
+yt has been tested with BoxLib data generated by Orion, Nyx, Maestro, Castro, 
+and WarpX. Currently it is cared for by a combination of Andrew Myers, Chris
 Malone, Matthew Turk, and Mike Zingale.
 
 To load a BoxLib dataset, you can use the ``yt.load`` command on
 the plotfile directory name.  In general, you must also have the
-``inputs`` file in the base directory, but Maestro and Castro will get
+``inputs`` file in the base directory, but Maestro, Castro, and WarpX will get
 all the necessary parameter information from the ``job_info`` file in
 the plotfile directory.  For instance, if you were in a
 directory with the following files:
@@ -308,7 +308,7 @@
    import yt
    ds = yt.load("pltgmlcs5600")
 
-For Maestro and Castro, you would not need the ``inputs`` file, and you
+For Maestro, Castro, and WarpX, you would not need the ``inputs`` file, and you
 would have a ``job_info`` file in the plotfile directory.
 
 .. rubric:: Caveats
@@ -316,7 +316,9 @@
 * yt does not read the Maestro base state (although you can have Maestro
   map it to a full Cartesian state variable before writing the plotfile
   to get around this).  E-mail the dev list if you need this support.
-* yt does not know about particles in Maestro.
+* yt supports BoxLib particle data stored in the standard format used 
+  by Nyx and WarpX. It currently does not support the ASCII particle
+  data used by Maestro and Castro.
 * For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
   depending on the value of the ``use_tfromp`` runtime parameter.
 * For Maestro, some velocity fields like ``velocity_magnitude`` or


https://bitbucket.org/yt_analysis/yt/commits/ba21c0e1ea0b/
Changeset:   ba21c0e1ea0b
Branch:      yt
User:        atmyers
Date:        2017-01-23 01:55:27+00:00
Summary:     Setting up answer tests for Nyx and WarpX datasets.
Affected #:  3 files

diff -r 04adaff1aecd5e9e915f41124b05f6affe54aab4 -r ba21c0e1ea0b6d3bbe6a65419d441a7f6dfe0271 yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -20,12 +20,18 @@
       OrionHierarchy, \
       OrionDataset, \
       CastroDataset, \
-      MaestroDataset
+      MaestroDataset, \
+      NyxDataset, \
+      NyxHierarchy, \
+      WarpXDataset, \
+      WarpXHierarchy
 
 from .fields import \
       BoxlibFieldInfo, \
       MaestroFieldInfo, \
-      CastroFieldInfo
+      CastroFieldInfo, \
+      NyxFieldInfo, \
+      WarpXFieldInfo
 
 from .io import \
       IOHandlerBoxlib

diff -r 04adaff1aecd5e9e915f41124b05f6affe54aab4 -r ba21c0e1ea0b6d3bbe6a65419d441a7f6dfe0271 yt/frontends/boxlib/tests/test_boxlib.py
--- /dev/null
+++ b/yt/frontends/boxlib/tests/test_boxlib.py
@@ -0,0 +1,183 @@
+"""
+Boxlib frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.boxlib.api import \
+    OrionDataset, \
+    NyxDataset, \
+    WarpXDataset
+import numpy as np    
+
+# We don't do anything needing ghost zone generation right now, because these
+# are non-periodic datasets.
+_orion_fields = ("temperature", "density", "velocity_magnitude")
+_nyx_fields = ("Ne", "Temp", "particle_mass_density")
+_warpx_fields = ("Ex", "By", "jz")
+
+radadvect = "RadAdvect/plt00000"
+ at requires_ds(radadvect)
+def test_radadvect():
+    ds = data_dir_load(radadvect)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radadvect.__name__ = test.description
+        yield test
+
+rt = "RadTube/plt00500"
+ at requires_ds(rt)
+def test_radtube():
+    ds = data_dir_load(rt)
+    yield assert_equal, str(ds), "plt00500"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radtube.__name__ = test.description
+        yield test
+
+star = "StarParticles/plrd01000"
+ at requires_ds(star)
+def test_star():
+    ds = data_dir_load(star)
+    yield assert_equal, str(ds), "plrd01000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_star.__name__ = test.description
+        yield test
+
+LyA = "Nyx_LyA/plt00000"
+ at requires_ds(LyA)
+def test_LyA():
+    ds = data_dir_load(LyA)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _nyx_fields,
+                                input_center="c",
+                                input_weight="Ne"):
+        test_LyA.__name__ = test.description
+        yield test
+
+ at requires_ds(LyA)
+def test_nyx_particle_io():
+    ds = data_dir_load(LyA)
+
+    grid = ds.index.grids[0]
+    npart_grid_0 = 7908  # read directly from the header
+    assert_equal(grid['particle_position_x'].size, npart_grid_0)
+    assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)
+
+    ad = ds.all_data()
+    npart = 32768  # read directly from the header
+    assert_equal(ad['particle_velocity_x'].size, npart)
+    assert_equal(ad['DM', 'particle_velocity_y'].size, npart)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart)
+
+    assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))
+
+    left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+langmuir = "LangmuirWave/plt00020"
+ at requires_ds(langmuir)
+def test_langmuir():
+    ds = data_dir_load(langmuir)
+    yield assert_equal, str(ds), "plt00020"
+    for test in small_patch_amr(ds, _warpx_fields, 
+                                input_center="c",
+                                input_weight="Ex"):
+        test_langmuir.__name__ = test.description
+        yield test
+
+plasma = "PlasmaAcceleration/plt00030"
+ at requires_ds(plasma)
+def test_plasma():
+    ds = data_dir_load(plasma)
+    yield assert_equal, str(ds), "plt00030"
+    for test in small_patch_amr(ds, _warpx_fields,
+                                input_center="c",
+                                input_weight="Ex"):
+        test_plasma.__name__ = test.description
+        yield test
+
+ at requires_ds(plasma)
+def test_warpx_particle_io():
+    ds = data_dir_load(plasma)
+    grid = ds.index.grids[0]
+
+    # read directly from the header
+    npart0_grid_0 = 344  
+    npart1_grid_0 = 69632
+
+    assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)
+    assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)
+
+    # read directly from the header
+    npart0 = 1360  
+    npart1 = 802816  
+    ad = ds.all_data()
+    assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)
+    assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)
+
+    np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])
+    np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])
+
+    left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')
+    right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+ at requires_file(rt)
+def test_OrionDataset():
+    assert isinstance(data_dir_load(rt), OrionDataset)
+
+ at requires_file(LyA)
+def test_NyxDataset():
+    assert isinstance(data_dir_load(LyA), NyxDataset)
+
+ at requires_file(LyA)
+def test_WarpXDataset():
+    assert isinstance(data_dir_load(plasma), WarpXDataset)
+
+ at requires_file(rt)
+def test_units_override():
+    for test in units_override_check(rt):
+        yield test

diff -r 04adaff1aecd5e9e915f41124b05f6affe54aab4 -r ba21c0e1ea0b6d3bbe6a65419d441a7f6dfe0271 yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-Orion frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import \
-    assert_equal, \
-    requires_file, \
-    units_override_check
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    small_patch_amr, \
-    data_dir_load
-from yt.frontends.boxlib.api import OrionDataset
-
-# We don't do anything needing ghost zone generation right now, because these
-# are non-periodic datasets.
-_fields = ("temperature", "density", "velocity_magnitude")
-
-radadvect = "RadAdvect/plt00000"
- at requires_ds(radadvect)
-def test_radadvect():
-    ds = data_dir_load(radadvect)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _fields):
-        test_radadvect.__name__ = test.description
-        yield test
-
-rt = "RadTube/plt00500"
- at requires_ds(rt)
-def test_radtube():
-    ds = data_dir_load(rt)
-    yield assert_equal, str(ds), "plt00500"
-    for test in small_patch_amr(ds, _fields):
-        test_radtube.__name__ = test.description
-        yield test
-
-star = "StarParticles/plrd01000"
- at requires_ds(star)
-def test_star():
-    ds = data_dir_load(star)
-    yield assert_equal, str(ds), "plrd01000"
-    for test in small_patch_amr(ds, _fields):
-        test_star.__name__ = test.description
-        yield test
-
- at requires_file(rt)
-def test_OrionDataset():
-    assert isinstance(data_dir_load(rt), OrionDataset)
-
- at requires_file(rt)
-def test_units_override():
-    for test in units_override_check(rt):
-        yield test
-


https://bitbucket.org/yt_analysis/yt/commits/48e3a2fa23f6/
Changeset:   48e3a2fa23f6
Branch:      yt
User:        atmyers
Date:        2017-01-23 01:55:50+00:00
Summary:     Rename test_boxlib to test_outputs
Affected #:  2 files

diff -r ba21c0e1ea0b6d3bbe6a65419d441a7f6dfe0271 -r 48e3a2fa23f6ef21b37e17c6f54ae5227b70592d yt/frontends/boxlib/tests/test_boxlib.py
--- a/yt/frontends/boxlib/tests/test_boxlib.py
+++ /dev/null
@@ -1,183 +0,0 @@
-"""
-Boxlib frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2017, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import \
-    assert_equal, \
-    requires_file, \
-    units_override_check
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    small_patch_amr, \
-    data_dir_load
-from yt.frontends.boxlib.api import \
-    OrionDataset, \
-    NyxDataset, \
-    WarpXDataset
-import numpy as np    
-
-# We don't do anything needing ghost zone generation right now, because these
-# are non-periodic datasets.
-_orion_fields = ("temperature", "density", "velocity_magnitude")
-_nyx_fields = ("Ne", "Temp", "particle_mass_density")
-_warpx_fields = ("Ex", "By", "jz")
-
-radadvect = "RadAdvect/plt00000"
- at requires_ds(radadvect)
-def test_radadvect():
-    ds = data_dir_load(radadvect)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_radadvect.__name__ = test.description
-        yield test
-
-rt = "RadTube/plt00500"
- at requires_ds(rt)
-def test_radtube():
-    ds = data_dir_load(rt)
-    yield assert_equal, str(ds), "plt00500"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_radtube.__name__ = test.description
-        yield test
-
-star = "StarParticles/plrd01000"
- at requires_ds(star)
-def test_star():
-    ds = data_dir_load(star)
-    yield assert_equal, str(ds), "plrd01000"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_star.__name__ = test.description
-        yield test
-
-LyA = "Nyx_LyA/plt00000"
- at requires_ds(LyA)
-def test_LyA():
-    ds = data_dir_load(LyA)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _nyx_fields,
-                                input_center="c",
-                                input_weight="Ne"):
-        test_LyA.__name__ = test.description
-        yield test
-
- at requires_ds(LyA)
-def test_nyx_particle_io():
-    ds = data_dir_load(LyA)
-
-    grid = ds.index.grids[0]
-    npart_grid_0 = 7908  # read directly from the header
-    assert_equal(grid['particle_position_x'].size, npart_grid_0)
-    assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)
-    assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)
-
-    ad = ds.all_data()
-    npart = 32768  # read directly from the header
-    assert_equal(ad['particle_velocity_x'].size, npart)
-    assert_equal(ad['DM', 'particle_velocity_y'].size, npart)
-    assert_equal(ad['all', 'particle_velocity_z'].size, npart)
-
-    assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))
-
-    left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')
-    right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')
-    center = 0.5*(left_edge + right_edge)
-                   
-    reg = ds.region(center, left_edge, right_edge)
-
-    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
-                                 reg['particle_position_x'] >= left_edge[0])))
-
-    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
-                                 reg['particle_position_y'] >= left_edge[1])))
-
-    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
-                                 reg['particle_position_z'] >= left_edge[2])))
-
-langmuir = "LangmuirWave/plt00020"
- at requires_ds(langmuir)
-def test_langmuir():
-    ds = data_dir_load(langmuir)
-    yield assert_equal, str(ds), "plt00020"
-    for test in small_patch_amr(ds, _warpx_fields, 
-                                input_center="c",
-                                input_weight="Ex"):
-        test_langmuir.__name__ = test.description
-        yield test
-
-plasma = "PlasmaAcceleration/plt00030"
- at requires_ds(plasma)
-def test_plasma():
-    ds = data_dir_load(plasma)
-    yield assert_equal, str(ds), "plt00030"
-    for test in small_patch_amr(ds, _warpx_fields,
-                                input_center="c",
-                                input_weight="Ex"):
-        test_plasma.__name__ = test.description
-        yield test
-
- at requires_ds(plasma)
-def test_warpx_particle_io():
-    ds = data_dir_load(plasma)
-    grid = ds.index.grids[0]
-
-    # read directly from the header
-    npart0_grid_0 = 344  
-    npart1_grid_0 = 69632
-
-    assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)
-    assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)
-    assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)
-
-    # read directly from the header
-    npart0 = 1360  
-    npart1 = 802816  
-    ad = ds.all_data()
-    assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)
-    assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)
-    assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)
-
-    np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])
-    np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])
-
-    left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')
-    right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')
-    center = 0.5*(left_edge + right_edge)
-                   
-    reg = ds.region(center, left_edge, right_edge)
-
-    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
-                                 reg['particle_position_x'] >= left_edge[0])))
-
-    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
-                                 reg['particle_position_y'] >= left_edge[1])))
-
-    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
-                                 reg['particle_position_z'] >= left_edge[2])))
-
- at requires_file(rt)
-def test_OrionDataset():
-    assert isinstance(data_dir_load(rt), OrionDataset)
-
- at requires_file(LyA)
-def test_NyxDataset():
-    assert isinstance(data_dir_load(LyA), NyxDataset)
-
- at requires_file(LyA)
-def test_WarpXDataset():
-    assert isinstance(data_dir_load(plasma), WarpXDataset)
-
- at requires_file(rt)
-def test_units_override():
-    for test in units_override_check(rt):
-        yield test

diff -r ba21c0e1ea0b6d3bbe6a65419d441a7f6dfe0271 -r 48e3a2fa23f6ef21b37e17c6f54ae5227b70592d yt/frontends/boxlib/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -0,0 +1,183 @@
+"""
+Boxlib frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.boxlib.api import \
+    OrionDataset, \
+    NyxDataset, \
+    WarpXDataset
+import numpy as np    
+
+# We don't do anything needing ghost zone generation right now, because these
+# are non-periodic datasets.
+_orion_fields = ("temperature", "density", "velocity_magnitude")
+_nyx_fields = ("Ne", "Temp", "particle_mass_density")
+_warpx_fields = ("Ex", "By", "jz")
+
+radadvect = "RadAdvect/plt00000"
+ at requires_ds(radadvect)
+def test_radadvect():
+    ds = data_dir_load(radadvect)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radadvect.__name__ = test.description
+        yield test
+
+rt = "RadTube/plt00500"
+ at requires_ds(rt)
+def test_radtube():
+    ds = data_dir_load(rt)
+    yield assert_equal, str(ds), "plt00500"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radtube.__name__ = test.description
+        yield test
+
+star = "StarParticles/plrd01000"
+ at requires_ds(star)
+def test_star():
+    ds = data_dir_load(star)
+    yield assert_equal, str(ds), "plrd01000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_star.__name__ = test.description
+        yield test
+
+LyA = "Nyx_LyA/plt00000"
+ at requires_ds(LyA)
+def test_LyA():
+    ds = data_dir_load(LyA)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _nyx_fields,
+                                input_center="c",
+                                input_weight="Ne"):
+        test_LyA.__name__ = test.description
+        yield test
+
+ at requires_ds(LyA)
+def test_nyx_particle_io():
+    ds = data_dir_load(LyA)
+
+    grid = ds.index.grids[0]
+    npart_grid_0 = 7908  # read directly from the header
+    assert_equal(grid['particle_position_x'].size, npart_grid_0)
+    assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)
+
+    ad = ds.all_data()
+    npart = 32768  # read directly from the header
+    assert_equal(ad['particle_velocity_x'].size, npart)
+    assert_equal(ad['DM', 'particle_velocity_y'].size, npart)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart)
+
+    assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))
+
+    left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+langmuir = "LangmuirWave/plt00020"
+ at requires_ds(langmuir)
+def test_langmuir():
+    ds = data_dir_load(langmuir)
+    yield assert_equal, str(ds), "plt00020"
+    for test in small_patch_amr(ds, _warpx_fields, 
+                                input_center="c",
+                                input_weight="Ex"):
+        test_langmuir.__name__ = test.description
+        yield test
+
+plasma = "PlasmaAcceleration/plt00030"
+ at requires_ds(plasma)
+def test_plasma():
+    ds = data_dir_load(plasma)
+    yield assert_equal, str(ds), "plt00030"
+    for test in small_patch_amr(ds, _warpx_fields,
+                                input_center="c",
+                                input_weight="Ex"):
+        test_plasma.__name__ = test.description
+        yield test
+
+ at requires_ds(plasma)
+def test_warpx_particle_io():
+    ds = data_dir_load(plasma)
+    grid = ds.index.grids[0]
+
+    # read directly from the header
+    npart0_grid_0 = 344  
+    npart1_grid_0 = 69632
+
+    assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)
+    assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)
+
+    # read directly from the header
+    npart0 = 1360  
+    npart1 = 802816  
+    ad = ds.all_data()
+    assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)
+    assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)
+
+    np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])
+    np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])
+
+    left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')
+    right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+ at requires_file(rt)
+def test_OrionDataset():
+    assert isinstance(data_dir_load(rt), OrionDataset)
+
+ at requires_file(LyA)
+def test_NyxDataset():
+    assert isinstance(data_dir_load(LyA), NyxDataset)
+
+ at requires_file(LyA)
+def test_WarpXDataset():
+    assert isinstance(data_dir_load(plasma), WarpXDataset)
+
+ at requires_file(rt)
+def test_units_override():
+    for test in units_override_check(rt):
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/d5b9c9f46ca2/
Changeset:   d5b9c9f46ca2
Branch:      yt
User:        atmyers
Date:        2017-01-23 03:10:45+00:00
Summary:     guess I can't change this file name.
Affected #:  2 files

diff -r 48e3a2fa23f6ef21b37e17c6f54ae5227b70592d -r d5b9c9f46ca2b8b6ba3dfa8c40e5c3d31a204a06 yt/frontends/boxlib/tests/test_orion.py
--- /dev/null
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -0,0 +1,183 @@
+"""
+Boxlib frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.boxlib.api import \
+    OrionDataset, \
+    NyxDataset, \
+    WarpXDataset
+import numpy as np    
+
+# We don't do anything needing ghost zone generation right now, because these
+# are non-periodic datasets.
+_orion_fields = ("temperature", "density", "velocity_magnitude")
+_nyx_fields = ("Ne", "Temp", "particle_mass_density")
+_warpx_fields = ("Ex", "By", "jz")
+
+radadvect = "RadAdvect/plt00000"
+ at requires_ds(radadvect)
+def test_radadvect():
+    ds = data_dir_load(radadvect)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radadvect.__name__ = test.description
+        yield test
+
+rt = "RadTube/plt00500"
+ at requires_ds(rt)
+def test_radtube():
+    ds = data_dir_load(rt)
+    yield assert_equal, str(ds), "plt00500"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radtube.__name__ = test.description
+        yield test
+
+star = "StarParticles/plrd01000"
+ at requires_ds(star)
+def test_star():
+    ds = data_dir_load(star)
+    yield assert_equal, str(ds), "plrd01000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_star.__name__ = test.description
+        yield test
+
+LyA = "Nyx_LyA/plt00000"
+ at requires_ds(LyA)
+def test_LyA():
+    ds = data_dir_load(LyA)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _nyx_fields,
+                                input_center="c",
+                                input_weight="Ne"):
+        test_LyA.__name__ = test.description
+        yield test
+
+ at requires_ds(LyA)
+def test_nyx_particle_io():
+    ds = data_dir_load(LyA)
+
+    grid = ds.index.grids[0]
+    npart_grid_0 = 7908  # read directly from the header
+    assert_equal(grid['particle_position_x'].size, npart_grid_0)
+    assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)
+
+    ad = ds.all_data()
+    npart = 32768  # read directly from the header
+    assert_equal(ad['particle_velocity_x'].size, npart)
+    assert_equal(ad['DM', 'particle_velocity_y'].size, npart)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart)
+
+    assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))
+
+    left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+langmuir = "LangmuirWave/plt00020"
+ at requires_ds(langmuir)
+def test_langmuir():
+    ds = data_dir_load(langmuir)
+    yield assert_equal, str(ds), "plt00020"
+    for test in small_patch_amr(ds, _warpx_fields, 
+                                input_center="c",
+                                input_weight="Ex"):
+        test_langmuir.__name__ = test.description
+        yield test
+
+plasma = "PlasmaAcceleration/plt00030"
+ at requires_ds(plasma)
+def test_plasma():
+    ds = data_dir_load(plasma)
+    yield assert_equal, str(ds), "plt00030"
+    for test in small_patch_amr(ds, _warpx_fields,
+                                input_center="c",
+                                input_weight="Ex"):
+        test_plasma.__name__ = test.description
+        yield test
+
+ at requires_ds(plasma)
+def test_warpx_particle_io():
+    ds = data_dir_load(plasma)
+    grid = ds.index.grids[0]
+
+    # read directly from the header
+    npart0_grid_0 = 344  
+    npart1_grid_0 = 69632
+
+    assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)
+    assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)
+
+    # read directly from the header
+    npart0 = 1360  
+    npart1 = 802816  
+    ad = ds.all_data()
+    assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)
+    assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)
+
+    np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])
+    np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])
+
+    left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')
+    right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+ at requires_file(rt)
+def test_OrionDataset():
+    assert isinstance(data_dir_load(rt), OrionDataset)
+
+ at requires_file(LyA)
+def test_NyxDataset():
+    assert isinstance(data_dir_load(LyA), NyxDataset)
+
+ at requires_file(LyA)
+def test_WarpXDataset():
+    assert isinstance(data_dir_load(plasma), WarpXDataset)
+
+ at requires_file(rt)
+def test_units_override():
+    for test in units_override_check(rt):
+        yield test

diff -r 48e3a2fa23f6ef21b37e17c6f54ae5227b70592d -r d5b9c9f46ca2b8b6ba3dfa8c40e5c3d31a204a06 yt/frontends/boxlib/tests/test_outputs.py
--- a/yt/frontends/boxlib/tests/test_outputs.py
+++ /dev/null
@@ -1,183 +0,0 @@
-"""
-Boxlib frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2017, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import \
-    assert_equal, \
-    requires_file, \
-    units_override_check
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    small_patch_amr, \
-    data_dir_load
-from yt.frontends.boxlib.api import \
-    OrionDataset, \
-    NyxDataset, \
-    WarpXDataset
-import numpy as np    
-
-# We don't do anything needing ghost zone generation right now, because these
-# are non-periodic datasets.
-_orion_fields = ("temperature", "density", "velocity_magnitude")
-_nyx_fields = ("Ne", "Temp", "particle_mass_density")
-_warpx_fields = ("Ex", "By", "jz")
-
-radadvect = "RadAdvect/plt00000"
- at requires_ds(radadvect)
-def test_radadvect():
-    ds = data_dir_load(radadvect)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_radadvect.__name__ = test.description
-        yield test
-
-rt = "RadTube/plt00500"
- at requires_ds(rt)
-def test_radtube():
-    ds = data_dir_load(rt)
-    yield assert_equal, str(ds), "plt00500"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_radtube.__name__ = test.description
-        yield test
-
-star = "StarParticles/plrd01000"
- at requires_ds(star)
-def test_star():
-    ds = data_dir_load(star)
-    yield assert_equal, str(ds), "plrd01000"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_star.__name__ = test.description
-        yield test
-
-LyA = "Nyx_LyA/plt00000"
- at requires_ds(LyA)
-def test_LyA():
-    ds = data_dir_load(LyA)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _nyx_fields,
-                                input_center="c",
-                                input_weight="Ne"):
-        test_LyA.__name__ = test.description
-        yield test
-
- at requires_ds(LyA)
-def test_nyx_particle_io():
-    ds = data_dir_load(LyA)
-
-    grid = ds.index.grids[0]
-    npart_grid_0 = 7908  # read directly from the header
-    assert_equal(grid['particle_position_x'].size, npart_grid_0)
-    assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)
-    assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)
-
-    ad = ds.all_data()
-    npart = 32768  # read directly from the header
-    assert_equal(ad['particle_velocity_x'].size, npart)
-    assert_equal(ad['DM', 'particle_velocity_y'].size, npart)
-    assert_equal(ad['all', 'particle_velocity_z'].size, npart)
-
-    assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))
-
-    left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')
-    right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')
-    center = 0.5*(left_edge + right_edge)
-                   
-    reg = ds.region(center, left_edge, right_edge)
-
-    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
-                                 reg['particle_position_x'] >= left_edge[0])))
-
-    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
-                                 reg['particle_position_y'] >= left_edge[1])))
-
-    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
-                                 reg['particle_position_z'] >= left_edge[2])))
-
-langmuir = "LangmuirWave/plt00020"
- at requires_ds(langmuir)
-def test_langmuir():
-    ds = data_dir_load(langmuir)
-    yield assert_equal, str(ds), "plt00020"
-    for test in small_patch_amr(ds, _warpx_fields, 
-                                input_center="c",
-                                input_weight="Ex"):
-        test_langmuir.__name__ = test.description
-        yield test
-
-plasma = "PlasmaAcceleration/plt00030"
- at requires_ds(plasma)
-def test_plasma():
-    ds = data_dir_load(plasma)
-    yield assert_equal, str(ds), "plt00030"
-    for test in small_patch_amr(ds, _warpx_fields,
-                                input_center="c",
-                                input_weight="Ex"):
-        test_plasma.__name__ = test.description
-        yield test
-
- at requires_ds(plasma)
-def test_warpx_particle_io():
-    ds = data_dir_load(plasma)
-    grid = ds.index.grids[0]
-
-    # read directly from the header
-    npart0_grid_0 = 344  
-    npart1_grid_0 = 69632
-
-    assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)
-    assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)
-    assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)
-
-    # read directly from the header
-    npart0 = 1360  
-    npart1 = 802816  
-    ad = ds.all_data()
-    assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)
-    assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)
-    assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)
-
-    np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])
-    np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])
-
-    left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')
-    right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')
-    center = 0.5*(left_edge + right_edge)
-                   
-    reg = ds.region(center, left_edge, right_edge)
-
-    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
-                                 reg['particle_position_x'] >= left_edge[0])))
-
-    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
-                                 reg['particle_position_y'] >= left_edge[1])))
-
-    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
-                                 reg['particle_position_z'] >= left_edge[2])))
-
- at requires_file(rt)
-def test_OrionDataset():
-    assert isinstance(data_dir_load(rt), OrionDataset)
-
- at requires_file(LyA)
-def test_NyxDataset():
-    assert isinstance(data_dir_load(LyA), NyxDataset)
-
- at requires_file(LyA)
-def test_WarpXDataset():
-    assert isinstance(data_dir_load(plasma), WarpXDataset)
-
- at requires_file(rt)
-def test_units_override():
-    for test in units_override_check(rt):
-        yield test


https://bitbucket.org/yt_analysis/yt/commits/d0f51b410299/
Changeset:   d0f51b410299
Branch:      yt
User:        atmyers
Date:        2017-01-23 17:40:48+00:00
Summary:     Incrementing the answer test number.
Affected #:  3 files

diff -r d5b9c9f46ca2b8b6ba3dfa8c40e5c3d31a204a06 -r d0f51b4102990a0b6c87753207ad65256dc05ba2 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -62,8 +62,8 @@
     - yt/visualization/volume_rendering/tests/test_mesh_render.py
     - yt/visualization/tests/test_mesh_slices.py:test_tri2
 
-  local_orion_001:
-    - yt/frontends/boxlib/tests/test_orion.py
+  local_boxlib_002:
+    - yt/frontends/boxlib/tests/test_outputs.py
 
   local_ramses_001:
     - yt/frontends/ramses/tests/test_outputs.py

diff -r d5b9c9f46ca2b8b6ba3dfa8c40e5c3d31a204a06 -r d0f51b4102990a0b6c87753207ad65256dc05ba2 yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ /dev/null
@@ -1,183 +0,0 @@
-"""
-Boxlib frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2017, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import \
-    assert_equal, \
-    requires_file, \
-    units_override_check
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    small_patch_amr, \
-    data_dir_load
-from yt.frontends.boxlib.api import \
-    OrionDataset, \
-    NyxDataset, \
-    WarpXDataset
-import numpy as np    
-
-# We don't do anything needing ghost zone generation right now, because these
-# are non-periodic datasets.
-_orion_fields = ("temperature", "density", "velocity_magnitude")
-_nyx_fields = ("Ne", "Temp", "particle_mass_density")
-_warpx_fields = ("Ex", "By", "jz")
-
-radadvect = "RadAdvect/plt00000"
- at requires_ds(radadvect)
-def test_radadvect():
-    ds = data_dir_load(radadvect)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_radadvect.__name__ = test.description
-        yield test
-
-rt = "RadTube/plt00500"
- at requires_ds(rt)
-def test_radtube():
-    ds = data_dir_load(rt)
-    yield assert_equal, str(ds), "plt00500"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_radtube.__name__ = test.description
-        yield test
-
-star = "StarParticles/plrd01000"
- at requires_ds(star)
-def test_star():
-    ds = data_dir_load(star)
-    yield assert_equal, str(ds), "plrd01000"
-    for test in small_patch_amr(ds, _orion_fields):
-        test_star.__name__ = test.description
-        yield test
-
-LyA = "Nyx_LyA/plt00000"
- at requires_ds(LyA)
-def test_LyA():
-    ds = data_dir_load(LyA)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _nyx_fields,
-                                input_center="c",
-                                input_weight="Ne"):
-        test_LyA.__name__ = test.description
-        yield test
-
- at requires_ds(LyA)
-def test_nyx_particle_io():
-    ds = data_dir_load(LyA)
-
-    grid = ds.index.grids[0]
-    npart_grid_0 = 7908  # read directly from the header
-    assert_equal(grid['particle_position_x'].size, npart_grid_0)
-    assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)
-    assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)
-
-    ad = ds.all_data()
-    npart = 32768  # read directly from the header
-    assert_equal(ad['particle_velocity_x'].size, npart)
-    assert_equal(ad['DM', 'particle_velocity_y'].size, npart)
-    assert_equal(ad['all', 'particle_velocity_z'].size, npart)
-
-    assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))
-
-    left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')
-    right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')
-    center = 0.5*(left_edge + right_edge)
-                   
-    reg = ds.region(center, left_edge, right_edge)
-
-    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
-                                 reg['particle_position_x'] >= left_edge[0])))
-
-    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
-                                 reg['particle_position_y'] >= left_edge[1])))
-
-    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
-                                 reg['particle_position_z'] >= left_edge[2])))
-
-langmuir = "LangmuirWave/plt00020"
- at requires_ds(langmuir)
-def test_langmuir():
-    ds = data_dir_load(langmuir)
-    yield assert_equal, str(ds), "plt00020"
-    for test in small_patch_amr(ds, _warpx_fields, 
-                                input_center="c",
-                                input_weight="Ex"):
-        test_langmuir.__name__ = test.description
-        yield test
-
-plasma = "PlasmaAcceleration/plt00030"
- at requires_ds(plasma)
-def test_plasma():
-    ds = data_dir_load(plasma)
-    yield assert_equal, str(ds), "plt00030"
-    for test in small_patch_amr(ds, _warpx_fields,
-                                input_center="c",
-                                input_weight="Ex"):
-        test_plasma.__name__ = test.description
-        yield test
-
- at requires_ds(plasma)
-def test_warpx_particle_io():
-    ds = data_dir_load(plasma)
-    grid = ds.index.grids[0]
-
-    # read directly from the header
-    npart0_grid_0 = 344  
-    npart1_grid_0 = 69632
-
-    assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)
-    assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)
-    assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)
-
-    # read directly from the header
-    npart0 = 1360  
-    npart1 = 802816  
-    ad = ds.all_data()
-    assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)
-    assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)
-    assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)
-
-    np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])
-    np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])
-
-    left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')
-    right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')
-    center = 0.5*(left_edge + right_edge)
-                   
-    reg = ds.region(center, left_edge, right_edge)
-
-    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
-                                 reg['particle_position_x'] >= left_edge[0])))
-
-    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
-                                 reg['particle_position_y'] >= left_edge[1])))
-
-    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
-                                 reg['particle_position_z'] >= left_edge[2])))
-
- at requires_file(rt)
-def test_OrionDataset():
-    assert isinstance(data_dir_load(rt), OrionDataset)
-
- at requires_file(LyA)
-def test_NyxDataset():
-    assert isinstance(data_dir_load(LyA), NyxDataset)
-
- at requires_file(LyA)
-def test_WarpXDataset():
-    assert isinstance(data_dir_load(plasma), WarpXDataset)
-
- at requires_file(rt)
-def test_units_override():
-    for test in units_override_check(rt):
-        yield test

diff -r d5b9c9f46ca2b8b6ba3dfa8c40e5c3d31a204a06 -r d0f51b4102990a0b6c87753207ad65256dc05ba2 yt/frontends/boxlib/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -0,0 +1,183 @@
+"""
+Boxlib frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.boxlib.api import \
+    OrionDataset, \
+    NyxDataset, \
+    WarpXDataset
+import numpy as np    
+
+# We don't do anything needing ghost zone generation right now, because these
+# are non-periodic datasets.
+_orion_fields = ("temperature", "density", "velocity_magnitude")
+_nyx_fields = ("Ne", "Temp", "particle_mass_density")
+_warpx_fields = ("Ex", "By", "jz")
+
+radadvect = "RadAdvect/plt00000"
+ at requires_ds(radadvect)
+def test_radadvect():
+    ds = data_dir_load(radadvect)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radadvect.__name__ = test.description
+        yield test
+
+rt = "RadTube/plt00500"
+ at requires_ds(rt)
+def test_radtube():
+    ds = data_dir_load(rt)
+    yield assert_equal, str(ds), "plt00500"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radtube.__name__ = test.description
+        yield test
+
+star = "StarParticles/plrd01000"
+ at requires_ds(star)
+def test_star():
+    ds = data_dir_load(star)
+    yield assert_equal, str(ds), "plrd01000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_star.__name__ = test.description
+        yield test
+
+LyA = "Nyx_LyA/plt00000"
+ at requires_ds(LyA)
+def test_LyA():
+    ds = data_dir_load(LyA)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _nyx_fields,
+                                input_center="c",
+                                input_weight="Ne"):
+        test_LyA.__name__ = test.description
+        yield test
+
+ at requires_ds(LyA)
+def test_nyx_particle_io():
+    ds = data_dir_load(LyA)
+
+    grid = ds.index.grids[0]
+    npart_grid_0 = 7908  # read directly from the header
+    assert_equal(grid['particle_position_x'].size, npart_grid_0)
+    assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)
+
+    ad = ds.all_data()
+    npart = 32768  # read directly from the header
+    assert_equal(ad['particle_velocity_x'].size, npart)
+    assert_equal(ad['DM', 'particle_velocity_y'].size, npart)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart)
+
+    assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))
+
+    left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+langmuir = "LangmuirWave/plt00020"
+ at requires_ds(langmuir)
+def test_langmuir():
+    ds = data_dir_load(langmuir)
+    yield assert_equal, str(ds), "plt00020"
+    for test in small_patch_amr(ds, _warpx_fields, 
+                                input_center="c",
+                                input_weight="Ex"):
+        test_langmuir.__name__ = test.description
+        yield test
+
+plasma = "PlasmaAcceleration/plt00030"
+ at requires_ds(plasma)
+def test_plasma():
+    ds = data_dir_load(plasma)
+    yield assert_equal, str(ds), "plt00030"
+    for test in small_patch_amr(ds, _warpx_fields,
+                                input_center="c",
+                                input_weight="Ex"):
+        test_plasma.__name__ = test.description
+        yield test
+
+ at requires_ds(plasma)
+def test_warpx_particle_io():
+    ds = data_dir_load(plasma)
+    grid = ds.index.grids[0]
+
+    # read directly from the header
+    npart0_grid_0 = 344  
+    npart1_grid_0 = 69632
+
+    assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)
+    assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)
+
+    # read directly from the header
+    npart0 = 1360  
+    npart1 = 802816  
+    ad = ds.all_data()
+    assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)
+    assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)
+
+    np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])
+    np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])
+
+    left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')
+    right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+ at requires_file(rt)
+def test_OrionDataset():
+    assert isinstance(data_dir_load(rt), OrionDataset)
+
+ at requires_file(LyA)
+def test_NyxDataset():
+    assert isinstance(data_dir_load(LyA), NyxDataset)
+
+ at requires_file(LyA)
+def test_WarpXDataset():
+    assert isinstance(data_dir_load(plasma), WarpXDataset)
+
+ at requires_file(rt)
+def test_units_override():
+    for test in units_override_check(rt):
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/312f8a740c1c/
Changeset:   312f8a740c1c
Branch:      yt
User:        atmyers
Date:        2017-01-23 20:06:54+00:00
Summary:     using requires_file instead of requires_ds here.
Affected #:  1 file

diff -r d0f51b4102990a0b6c87753207ad65256dc05ba2 -r 312f8a740c1c9c5a72e1d6d480b3bfb67e27bb13 yt/frontends/boxlib/tests/test_outputs.py
--- a/yt/frontends/boxlib/tests/test_outputs.py
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -71,7 +71,7 @@
         test_LyA.__name__ = test.description
         yield test
 
- at requires_ds(LyA)
+ at requires_file(LyA)
 def test_nyx_particle_io():
     ds = data_dir_load(LyA)
 
@@ -126,7 +126,7 @@
         test_plasma.__name__ = test.description
         yield test
 
- at requires_ds(plasma)
+ at requires_file(plasma)
 def test_warpx_particle_io():
     ds = data_dir_load(plasma)
     grid = ds.index.grids[0]


https://bitbucket.org/yt_analysis/yt/commits/ebadd50d6e38/
Changeset:   ebadd50d6e38
Branch:      yt
User:        atmyers
Date:        2017-01-24 01:20:17+00:00
Summary:     split boxlib tests into two buckets.
Affected #:  1 file

diff -r 312f8a740c1c9c5a72e1d6d480b3bfb67e27bb13 -r ebadd50d6e384061696e7f1f5e13f0d1d01dd580 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -63,7 +63,20 @@
     - yt/visualization/tests/test_mesh_slices.py:test_tri2
 
   local_boxlib_002:
-    - yt/frontends/boxlib/tests/test_outputs.py
+    - yt/frontends/boxlib/tests/test_outputs.py:test_radadvect
+    - yt/frontends/boxlib/tests/test_outputs.py:test_radtube
+    - yt/frontends/boxlib/tests/test_outputs.py:test_star
+    - yt/frontends/boxlib/tests/test_outputs.py:test_OrionDataset
+    - yt/frontends/boxlib/tests/test_outputs.py:test_units_override
+
+  local_boxlib_particles_001:
+    - yt/frontends/boxlib/tests/test_outputs.py:test_LyA
+    - yt/frontends/boxlib/tests/test_outputs.py:test_nyx_particle_io
+    - yt/frontends/boxlib/tests/test_outputs.py:test_langmuir
+    - yt/frontends/boxlib/tests/test_outputs.py:test_plasma
+    - yt/frontends/boxlib/tests/test_outputs.py:test_warpx_particle_io
+    - yt/frontends/boxlib/tests/test_outputs.py:test_NyxDataset
+    - yt/frontends/boxlib/tests/test_outputs.py:test_WarpXDataset
 
   local_ramses_001:
     - yt/frontends/ramses/tests/test_outputs.py


https://bitbucket.org/yt_analysis/yt/commits/5c3af8da1dd8/
Changeset:   5c3af8da1dd8
Branch:      yt
User:        ngoldbaum
Date:        2017-01-24 16:14:53+00:00
Summary:     Merged in atmyers/yt (pull request #2497)

Boxlib particle support
Affected #:  9 files

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -274,13 +274,13 @@
 BoxLib Data
 -----------
 
-yt has been tested with BoxLib data generated by Orion, Nyx, Maestro and
-Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
+yt has been tested with BoxLib data generated by Orion, Nyx, Maestro, Castro, 
+and WarpX. Currently it is cared for by a combination of Andrew Myers, Chris
 Malone, Matthew Turk, and Mike Zingale.
 
 To load a BoxLib dataset, you can use the ``yt.load`` command on
 the plotfile directory name.  In general, you must also have the
-``inputs`` file in the base directory, but Maestro and Castro will get
+``inputs`` file in the base directory, but Maestro, Castro, and WarpX will get
 all the necessary parameter information from the ``job_info`` file in
 the plotfile directory.  For instance, if you were in a
 directory with the following files:
@@ -308,7 +308,7 @@
    import yt
    ds = yt.load("pltgmlcs5600")
 
-For Maestro and Castro, you would not need the ``inputs`` file, and you
+For Maestro, Castro, and WarpX, you would not need the ``inputs`` file, and you
 would have a ``job_info`` file in the plotfile directory.
 
 .. rubric:: Caveats
@@ -316,7 +316,9 @@
 * yt does not read the Maestro base state (although you can have Maestro
   map it to a full Cartesian state variable before writing the plotfile
   to get around this).  E-mail the dev list if you need this support.
-* yt does not know about particles in Maestro.
+* yt supports BoxLib particle data stored in the standard format used 
+  by Nyx and WarpX. It currently does not support the ASCII particle
+  data used by Maestro and Castro.
 * For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
   depending on the value of the ``use_tfromp`` runtime parameter.
 * For Maestro, some velocity fields like ``velocity_magnitude`` or

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -46,7 +46,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | MOAB                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| Nyx                   |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
+| Nyx                   |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | openPMD               |     Y      |     Y     |      N     |   Y   |    Y     |    Y     |     N      | Partial  |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
@@ -62,6 +62,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Tipsy                 |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| WarpX                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 
 .. [#f1] one-dimensional base-state not read in currently.
 .. [#f2] These handle mesh fields using an in-memory octree that has not been parallelized.

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -68,8 +68,21 @@
     - yt/visualization/tests/test_mesh_slices.py:test_tri2
     - yt/visualization/tests/test_mesh_slices.py:test_multi_region
 
-  local_orion_001:
-    - yt/frontends/boxlib/tests/test_orion.py
+  local_boxlib_002:
+    - yt/frontends/boxlib/tests/test_outputs.py:test_radadvect
+    - yt/frontends/boxlib/tests/test_outputs.py:test_radtube
+    - yt/frontends/boxlib/tests/test_outputs.py:test_star
+    - yt/frontends/boxlib/tests/test_outputs.py:test_OrionDataset
+    - yt/frontends/boxlib/tests/test_outputs.py:test_units_override
+
+  local_boxlib_particles_001:
+    - yt/frontends/boxlib/tests/test_outputs.py:test_LyA
+    - yt/frontends/boxlib/tests/test_outputs.py:test_nyx_particle_io
+    - yt/frontends/boxlib/tests/test_outputs.py:test_langmuir
+    - yt/frontends/boxlib/tests/test_outputs.py:test_plasma
+    - yt/frontends/boxlib/tests/test_outputs.py:test_warpx_particle_io
+    - yt/frontends/boxlib/tests/test_outputs.py:test_NyxDataset
+    - yt/frontends/boxlib/tests/test_outputs.py:test_WarpXDataset
 
   local_ramses_001:
     - yt/frontends/ramses/tests/test_outputs.py

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -20,12 +20,18 @@
       OrionHierarchy, \
       OrionDataset, \
       CastroDataset, \
-      MaestroDataset
+      MaestroDataset, \
+      NyxDataset, \
+      NyxHierarchy, \
+      WarpXDataset, \
+      WarpXHierarchy
 
 from .fields import \
       BoxlibFieldInfo, \
       MaestroFieldInfo, \
-      CastroFieldInfo
+      CastroFieldInfo, \
+      NyxFieldInfo, \
+      WarpXFieldInfo
 
 from .io import \
       IOHandlerBoxlib

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -16,19 +16,21 @@
 import inspect
 import os
 import re
+from collections import namedtuple
 
 from stat import ST_CTIME
 
 import numpy as np
+import glob
 
 from yt.funcs import \
     ensure_tuple, \
     mylog, \
     setdefaultattr
 from yt.data_objects.grid_patch import AMRGridPatch
-from yt.extern.six.moves import zip as izip
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
+from yt.units import YTQuantity
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
@@ -39,9 +41,10 @@
 
 from .fields import \
     BoxlibFieldInfo, \
+    NyxFieldInfo, \
     MaestroFieldInfo, \
-    CastroFieldInfo
-
+    CastroFieldInfo, \
+    WarpXFieldInfo
 
 # This is what we use to find scientific notation that might include d's
 # instead of e's.
@@ -74,6 +77,7 @@
         self._base_offset = offset
         self._parent_id = []
         self._children_ids = []
+        self._pdata = {}
 
     def _prepare_grid(self):
         super(BoxlibGrid, self)._prepare_grid()
@@ -142,6 +146,7 @@
         self.dataset_type = dataset_type
         self.header_filename = os.path.join(ds.output_dir, 'Header')
         self.directory = ds.output_dir
+        self.particle_headers = {}
 
         GridIndex.__init__(self, ds, dataset_type)
         self._cache_endianness(self.grids[-1])
@@ -360,6 +365,32 @@
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.dataset)
 
+    def _read_particles(self, directory_name, is_checkpoint, extra_field_names=None):
+
+        self.particle_headers[directory_name] = BoxLibParticleHeader(self.ds,
+                                                                     directory_name,
+                                                                     is_checkpoint,
+                                                                     extra_field_names)
+
+        base_particle_fn = self.ds.output_dir + '/' + directory_name + "/Level_%d/DATA_%.4d"
+
+        gid = 0
+        for lev, data in self.particle_headers[directory_name].data_map.items():
+            for pdf in data.values():
+                pdict = self.grids[gid]._pdata
+                pdict[directory_name] = {}
+                pdict[directory_name]["particle_filename"] = base_particle_fn % \
+                                                             (lev, pdf.file_number)
+                pdict[directory_name]["offset"] = pdf.offset
+                pdict[directory_name]["NumberOfParticles"] = pdf.num_particles
+                self.grid_particle_count[gid] += pdf.num_particles
+                self.grids[gid].NumberOfParticles += pdf.num_particles
+                gid += 1
+
+        # add particle fields to field_list
+        pfield_list = self.particle_headers[directory_name].known_fields
+        self.field_list.extend(pfield_list)
+
 
 class BoxlibDataset(Dataset):
     """
@@ -690,7 +721,7 @@
 
     def _read_particles(self):
         """
-        reads in particles and assigns them to grids. Will search for
+        Reads in particles and assigns them to grids. Will search for
         Star particles, then sink particles if no star particle file
         is found, and finally will simply note that no particles are
         found if neither works. To add a new Orion particle type,
@@ -778,6 +809,9 @@
         if os.path.exists(jobinfo_filename):
             return False
         # Now we check for all the others
+        warpx_jobinfo_filename = os.path.join(output_dir, "warpx_job_info")
+        if os.path.exists(warpx_jobinfo_filename):
+            return False
         lines = open(inputs_filename).readlines()
         if any(("castro." in line for line in lines)): return False
         if any(("nyx." in line for line in lines)): return False
@@ -910,47 +944,23 @@
 
     def __init__(self, ds, dataset_type='nyx_native'):
         super(NyxHierarchy, self).__init__(ds, dataset_type)
-        self._read_particle_header()
-
-    def _read_particle_header(self):
-        if not self.ds.parameters["particles"]:
-            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
-            return
-        for fn in ['particle_position_%s' % ax for ax in 'xyz'] + \
-                  ['particle_mass'] +  \
-                  ['particle_velocity_%s' % ax for ax in 'xyz']:
-            self.field_list.append(("io", fn))
-        header = open(os.path.join(self.ds.output_dir, "DM", "Header"))
-        version = header.readline()  # NOQA
-        ndim = header.readline()  # NOQA
-        nfields = header.readline()  # NOQA
-        ntotalpart = int(header.readline())  # NOQA
-        nextid = header.readline()  # NOQA
-        maxlevel = int(header.readline())  # NOQA
 
-        # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel + 1):
-            header.readline()
+        # extra beyond the base real fields that all Boxlib
+        # particles have, i.e. the xyz positions
+        nyx_extra_real_fields = ['particle_mass',
+                                 'particle_velocity_x',
+                                 'particle_velocity_y',
+                                 'particle_velocity_z']
 
-        grid_info = np.fromiter((int(i) for line in header.readlines()
-                                 for i in line.split()),
-                                dtype='int64',
-                                count=3*self.num_grids).reshape((self.num_grids, 3))
-        # we need grid_info in `populate_grid_objects`, so save it to self
+        is_checkpoint = False
 
-        for g, pg in izip(self.grids, grid_info):
-            g.particle_filename = os.path.join(self.ds.output_dir, "DM",
-                                               "Level_%s" % (g.Level),
-                                               "DATA_%04i" % pg[0])
-            g.NumberOfParticles = pg[1]
-            g._particle_offset = pg[2]
-
-        self.grid_particle_count[:, 0] = grid_info[:, 1]
+        self._read_particles("DM", is_checkpoint, nyx_extra_real_fields)
 
 
 class NyxDataset(BoxlibDataset):
 
     _index_class = NyxHierarchy
+    _field_info_class = NyxFieldInfo
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
@@ -1013,7 +1023,7 @@
         if os.path.isdir(os.path.join(self.output_dir, "DM")):
             # we have particles
             self.parameters["particles"] = 1 
-            self.particle_types = ("io",)
+            self.particle_types = ("DM",)
             self.particle_types_raw = self.particle_types
 
     def _set_code_unit_attributes(self):
@@ -1044,3 +1054,219 @@
     if len(vals) == 1:
         vals = vals[0]
     return vals
+
+
+class BoxLibParticleHeader(object):
+
+    def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None):
+
+        self.species_name = directory_name
+        header_filename =  ds.output_dir + "/" + directory_name + "/Header"
+        with open(header_filename, "r") as f:
+            self.version_string = f.readline().strip()
+
+            particle_real_type = self.version_string.split('_')[-1]
+            particle_real_type = self.version_string.split('_')[-1]
+            if particle_real_type == 'double':
+                self.real_type = np.float64
+            elif particle_real_type == 'single':
+                self.real_type = np.float32
+            else:
+                raise RuntimeError("yt did not recognize particle real type.")
+            self.int_type = np.int32
+
+            self.dim = int(f.readline().strip())
+            self.num_int_base = 2 + self.dim
+            self.num_real_base = self.dim
+            self.num_int_extra = 0  # this should be written by Boxlib, but isn't
+            self.num_real_extra = int(f.readline().strip())
+            self.num_int = self.num_int_base + self.num_int_extra
+            self.num_real = self.num_real_base + self.num_real_extra            
+            self.num_particles = int(f.readline().strip())
+            self.max_next_id = int(f.readline().strip())
+            self.finest_level = int(f.readline().strip())
+            self.num_levels = self.finest_level + 1
+
+            # Boxlib particles can be written in checkpoint or plotfile mode
+            # The base integer fields are only there for checkpoints, but some
+            # codes use the checkpoint format for plotting
+            if not is_checkpoint:
+                self.num_int_base = 0
+                self.num_int_extra = 0
+                self.num_int = 0
+
+            self.grids_per_level = np.zeros(self.num_levels, dtype='int64')
+            self.data_map = {}
+            for level_num in range(self.num_levels):
+                self.grids_per_level[level_num] = int(f.readline().strip())
+                self.data_map[level_num] = {}
+            
+            pfd = namedtuple("ParticleFileDescriptor",
+                             ["file_number", "num_particles", "offset"])
+
+            for level_num in range(self.num_levels):
+                for grid_num in range(self.grids_per_level[level_num]):
+                    entry = [int(val) for val in f.readline().strip().split()]
+                    self.data_map[level_num][grid_num] = pfd(*entry)
+
+        self._generate_particle_fields(extra_field_names)
+
+    def _generate_particle_fields(self, extra_field_names):
+
+        # these are the 'base' integer fields
+        self.known_int_fields = [(self.species_name, "particle_id"),
+                                 (self.species_name, "particle_cpu"),
+                                 (self.species_name, "particle_cell_x"),
+                                 (self.species_name, "particle_cell_y"),
+                                 (self.species_name, "particle_cell_z")]
+        self.known_int_fields = self.known_int_fields[0:self.num_int_base]
+
+        # these are extra integer fields
+        extra_int_fields = ["particle_int_comp%d" % i 
+                            for i in range(self.num_int_extra)]
+        self.known_int_fields.extend([(self.species_name, field) 
+                                      for field in extra_int_fields])
+
+        # these are the base real fields
+        self.known_real_fields = [(self.species_name, "particle_position_x"),
+                                  (self.species_name, "particle_position_y"),
+                                  (self.species_name, "particle_position_z")]
+        self.known_real_fields = self.known_real_fields[0:self.num_real_base]
+
+        # these are the extras
+        if extra_field_names is not None:
+            assert(len(extra_field_names) == self.num_real_extra)
+        else:
+            extra_field_names = ["particle_real_comp%d" % i 
+                                 for i in range(self.num_real_extra)]
+
+        self.known_real_fields.extend([(self.species_name, field) 
+                                       for field in extra_field_names])
+
+        self.known_fields = self.known_int_fields + self.known_real_fields
+
+        self.particle_int_dtype = np.dtype([(t[1], self.int_type) 
+                                            for t in self.known_int_fields])
+
+        self.particle_real_dtype = np.dtype([(t[1], self.real_type) 
+                                            for t in self.known_real_fields])
+
+
+class WarpXHierarchy(BoxlibHierarchy):
+
+    def __init__(self, ds, dataset_type="boxlib_native"):
+        super(WarpXHierarchy, self).__init__(ds, dataset_type)
+
+        # extra beyond the base real fields that all Boxlib
+        # particles have, i.e. the xyz positions
+        warpx_extra_real_fields = ['particle_weight',
+                                   'particle_velocity_x',
+                                   'particle_velocity_y',
+                                   'particle_velocity_z']
+
+        is_checkpoint = False
+
+        for ptype in self.ds.particle_types:
+            self._read_particles(ptype, is_checkpoint, warpx_extra_real_fields)
+        
+        # Additional WarpX particle information (used to set up species)
+        with open(self.ds.output_dir + "/WarpXHeader", 'r') as f:
+
+            # skip to the end, where species info is written out
+            line = f.readline()
+            while line and line != ')\n':
+                line = f.readline()
+            line = f.readline()
+
+            # Read in the species information
+            species_id = 0
+            while line:
+                line = line.strip().split()
+                charge = YTQuantity(float(line[0]), "C")
+                mass = YTQuantity(float(line[1]), "kg")
+                charge_name = 'particle%.1d_charge' % species_id
+                mass_name = 'particle%.1d_mass' % species_id
+                self.parameters[charge_name] = charge
+                self.parameters[mass_name] = mass
+                line = f.readline()
+                species_id += 1
+    
+def _skip_line(line):
+    if len(line) == 0:
+        return True
+    if line[0] == '\n':
+        return True
+    if line[0] == "=":
+        return True
+    if line[0] == ' ':
+        return True
+
+
+class WarpXDataset(BoxlibDataset):
+
+    _index_class = WarpXHierarchy
+    _field_info_class = WarpXFieldInfo
+
+    def __init__(self, output_dir,
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
+                 dataset_type='boxlib_native',
+                 storage_filename=None,
+                 units_override=None,
+                 unit_system="mks"):
+
+        super(WarpXDataset, self).__init__(output_dir,
+                                           cparam_filename,
+                                           fparam_filename,
+                                           dataset_type,
+                                           storage_filename,
+                                           units_override,
+                                           unit_system)
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "warpx_job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        if os.path.exists(jobinfo_filename):
+            return True
+        return False
+
+    def _parse_parameter_file(self):
+        super(WarpXDataset, self)._parse_parameter_file()
+        jobinfo_filename = os.path.join(self.output_dir, "warpx_job_info")
+        with open(jobinfo_filename, "r") as f:
+            for line in f.readlines():
+                if _skip_line(line):
+                    continue
+                l = line.strip().split(":")
+                if len(l) == 2:
+                    self.parameters[l[0].strip()] = l[1].strip()
+                l = line.strip().split("=")
+                if len(l) == 2:
+                    self.parameters[l[0].strip()] = l[1].strip()
+                
+        # set the periodicity based on the integer BC runtime parameters
+        is_periodic = self.parameters['geometry.is_periodic'].split()
+        periodicity = [bool(val) for val in is_periodic]
+        self.periodicity = ensure_tuple(periodicity)
+
+        species_list = []
+        species_dirs = glob.glob(self.output_dir + "/particle*")
+        for species in species_dirs:
+            species_list.append(species[len(self.output_dir)+1:])
+        self.particle_types = tuple(species_list)
+        self.particle_types_raw = self.particle_types
+
+
+    def _set_code_unit_attributes(self):
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "m"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "kg"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "m/s"))

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -51,6 +51,66 @@
     return tr
 
 
+class WarpXFieldInfo(FieldInfoContainer):
+
+    known_other_fields = (
+        ("Bx", ("T", ["magnetic_field_x"], None)),
+        ("By", ("T", ["magnetic_field_y"], None)),
+        ("Bz", ("T", ["magnetic_field_z"], None)),
+        ("Ex", ("V/m", ["electric_field_x"], None)),
+        ("Ey", ("V/m", ["electric_field_y"], None)),
+        ("Ex", ("V/m", ["electric_field_z"], None)),
+        ("jx", ("A", ["current_x"], None)),
+        ("jy", ("A", ["current_y"], None)),
+        ("jz", ("A", ["current_z"], None)),
+    )
+
+    known_particle_fields = (
+        ("particle_weight", ("", [], None)),
+        ("particle_position_x", ("m", [], None)),
+        ("particle_position_y", ("m", [], None)),
+        ("particle_position_z", ("m", [], None)),
+        ("particle_velocity_x", ("m/s", [], None)),
+        ("particle_velocity_y", ("m/s", [], None)),
+        ("particle_velocity_z", ("m/s", [], None)),
+    )
+
+    extra_union_fields = (
+        ("kg", "particle_mass"),
+        ("C", "particle_charge"),
+        ("", "particle_ones"),
+    )
+
+    def setup_particle_fields(self, ptype):
+
+        def get_mass(field, data):
+            species_mass = data.ds.index.parameters[ptype + '_mass']
+            return data["particle_weight"]*species_mass
+
+        self.add_field((ptype, "particle_mass"), sampling_type="particle",
+                       function=get_mass,
+                       units="kg")
+
+        def get_charge(field, data):
+            species_charge = data.ds.index.parameters[ptype + '_charge']
+            return data["particle_weight"]*species_charge
+
+        self.add_field((ptype, "particle_charge"), sampling_type="particle",
+                       function=get_charge,
+                       units="C")
+
+        super(WarpXFieldInfo, self).setup_particle_fields(ptype)
+
+
+class NyxFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+    known_particle_fields = (
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+    )
+
+
 class BoxlibFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", (rho_units, ["density"], None)),

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -1,5 +1,5 @@
 """
-Orion data-file handling functions
+Boxlib data-file handling functions
 
 
 
@@ -81,6 +81,85 @@
                         local_offset += size
         return data
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+        unions = self.ds.particle_unions
+
+        rv = {f: np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    if ftype in unions:
+                        for subtype in unions[ftype]:
+                            data = self._read_particles(grid, selector,
+                                                        subtype, fname)
+                            rv[ftype, fname] = np.concatenate((data,
+                                                               rv[ftype, fname]))
+                    else:
+                        data = self._read_particles(grid, selector,
+                                                    ftype, fname)
+                        rv[ftype, fname] = np.concatenate((data,
+                                                           rv[ftype, fname]))
+        return rv
+
+    def _read_particles(self, grid, selector, ftype, name):
+
+        npart = grid._pdata[ftype]["NumberOfParticles"]
+        if npart == 0:
+            return np.array([])
+
+        fn = grid._pdata[ftype]["particle_filename"]
+        offset = grid._pdata[ftype]["offset"]
+        pheader = self.ds.index.particle_headers[ftype]
+        
+        # handle the case that this is an integer field
+        int_fnames = [fname for _, fname in pheader.known_int_fields]
+        if name in int_fnames:
+            ind = int_fnames.index(name)
+            fn = grid._pdata[ftype]["particle_filename"]
+            with open(fn, "rb") as f:
+
+                # read in the position fields for selection
+                f.seek(offset + 
+                       pheader.particle_int_dtype.itemsize * npart)
+                rdata = np.fromfile(f, pheader.real_type, pheader.num_real * npart)
+                x = np.asarray(rdata[0::pheader.num_real], dtype=np.float64)
+                y = np.asarray(rdata[1::pheader.num_real], dtype=np.float64)
+                z = np.asarray(rdata[2::pheader.num_real], dtype=np.float64)
+                mask = selector.select_points(x, y, z, 0.0)
+
+                if mask is None:
+                    return np.array([])
+                
+                # read in the data we want
+                f.seek(offset)
+                idata = np.fromfile(f, pheader.int_type, pheader.num_int * npart)
+                data = np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
+                return data[mask].flatten()
+
+        # handle case that this is a real field
+        real_fnames = [fname for _, fname in pheader.known_real_fields]
+        if name in real_fnames:
+            ind = real_fnames.index(name)
+            with open(fn, "rb") as f:
+
+                # read in the position fields for selection
+                f.seek(offset + 
+                       pheader.particle_int_dtype.itemsize * npart)
+                rdata = np.fromfile(f, pheader.real_type, pheader.num_real * npart)
+                x = np.asarray(rdata[0::pheader.num_real], dtype=np.float64)
+                y = np.asarray(rdata[1::pheader.num_real], dtype=np.float64)
+                z = np.asarray(rdata[2::pheader.num_real], dtype=np.float64)
+                mask = selector.select_points(x, y, z, 0.0)
+
+                if mask is None:
+                    return np.array([])
+
+                data = np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)
+                return data[mask].flatten()
+
+
 class IOHandlerOrion(IOHandlerBoxlib):
     _dataset_type = "orion_native"
 

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-Orion frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import \
-    assert_equal, \
-    requires_file, \
-    units_override_check
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    small_patch_amr, \
-    data_dir_load
-from yt.frontends.boxlib.api import OrionDataset
-
-# We don't do anything needing ghost zone generation right now, because these
-# are non-periodic datasets.
-_fields = ("temperature", "density", "velocity_magnitude")
-
-radadvect = "RadAdvect/plt00000"
- at requires_ds(radadvect)
-def test_radadvect():
-    ds = data_dir_load(radadvect)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _fields):
-        test_radadvect.__name__ = test.description
-        yield test
-
-rt = "RadTube/plt00500"
- at requires_ds(rt)
-def test_radtube():
-    ds = data_dir_load(rt)
-    yield assert_equal, str(ds), "plt00500"
-    for test in small_patch_amr(ds, _fields):
-        test_radtube.__name__ = test.description
-        yield test
-
-star = "StarParticles/plrd01000"
- at requires_ds(star)
-def test_star():
-    ds = data_dir_load(star)
-    yield assert_equal, str(ds), "plrd01000"
-    for test in small_patch_amr(ds, _fields):
-        test_star.__name__ = test.description
-        yield test
-
- at requires_file(rt)
-def test_OrionDataset():
-    assert isinstance(data_dir_load(rt), OrionDataset)
-
- at requires_file(rt)
-def test_units_override():
-    for test in units_override_check(rt):
-        yield test
-

diff -r c23ff278bd328ac2f2944502c46acdb072be10b5 -r 5c3af8da1dd806d6f5d76466e04feade4b1d6e5f yt/frontends/boxlib/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -0,0 +1,183 @@
+"""
+Boxlib frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.boxlib.api import \
+    OrionDataset, \
+    NyxDataset, \
+    WarpXDataset
+import numpy as np    
+
+# We don't do anything needing ghost zone generation right now, because these
+# are non-periodic datasets.
+_orion_fields = ("temperature", "density", "velocity_magnitude")
+_nyx_fields = ("Ne", "Temp", "particle_mass_density")
+_warpx_fields = ("Ex", "By", "jz")
+
+radadvect = "RadAdvect/plt00000"
+ at requires_ds(radadvect)
+def test_radadvect():
+    ds = data_dir_load(radadvect)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radadvect.__name__ = test.description
+        yield test
+
+rt = "RadTube/plt00500"
+ at requires_ds(rt)
+def test_radtube():
+    ds = data_dir_load(rt)
+    yield assert_equal, str(ds), "plt00500"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_radtube.__name__ = test.description
+        yield test
+
+star = "StarParticles/plrd01000"
+ at requires_ds(star)
+def test_star():
+    ds = data_dir_load(star)
+    yield assert_equal, str(ds), "plrd01000"
+    for test in small_patch_amr(ds, _orion_fields):
+        test_star.__name__ = test.description
+        yield test
+
+LyA = "Nyx_LyA/plt00000"
+ at requires_ds(LyA)
+def test_LyA():
+    ds = data_dir_load(LyA)
+    yield assert_equal, str(ds), "plt00000"
+    for test in small_patch_amr(ds, _nyx_fields,
+                                input_center="c",
+                                input_weight="Ne"):
+        test_LyA.__name__ = test.description
+        yield test
+
+ at requires_file(LyA)
+def test_nyx_particle_io():
+    ds = data_dir_load(LyA)
+
+    grid = ds.index.grids[0]
+    npart_grid_0 = 7908  # read directly from the header
+    assert_equal(grid['particle_position_x'].size, npart_grid_0)
+    assert_equal(grid['DM', 'particle_position_y'].size, npart_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart_grid_0)
+
+    ad = ds.all_data()
+    npart = 32768  # read directly from the header
+    assert_equal(ad['particle_velocity_x'].size, npart)
+    assert_equal(ad['DM', 'particle_velocity_y'].size, npart)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart)
+
+    assert(np.all(ad['particle_mass'] == ad['particle_mass'][0]))
+
+    left_edge = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    right_edge = ds.arr([4.0, 4.0, 4.0], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+langmuir = "LangmuirWave/plt00020"
+ at requires_ds(langmuir)
+def test_langmuir():
+    ds = data_dir_load(langmuir)
+    yield assert_equal, str(ds), "plt00020"
+    for test in small_patch_amr(ds, _warpx_fields, 
+                                input_center="c",
+                                input_weight="Ex"):
+        test_langmuir.__name__ = test.description
+        yield test
+
+plasma = "PlasmaAcceleration/plt00030"
+ at requires_ds(plasma)
+def test_plasma():
+    ds = data_dir_load(plasma)
+    yield assert_equal, str(ds), "plt00030"
+    for test in small_patch_amr(ds, _warpx_fields,
+                                input_center="c",
+                                input_weight="Ex"):
+        test_plasma.__name__ = test.description
+        yield test
+
+ at requires_file(plasma)
+def test_warpx_particle_io():
+    ds = data_dir_load(plasma)
+    grid = ds.index.grids[0]
+
+    # read directly from the header
+    npart0_grid_0 = 344  
+    npart1_grid_0 = 69632
+
+    assert_equal(grid['particle0', 'particle_position_x'].size, npart0_grid_0)
+    assert_equal(grid['particle1', 'particle_position_y'].size, npart1_grid_0)
+    assert_equal(grid['all', 'particle_position_z'].size, npart0_grid_0 + npart1_grid_0)
+
+    # read directly from the header
+    npart0 = 1360  
+    npart1 = 802816  
+    ad = ds.all_data()
+    assert_equal(ad['particle0', 'particle_velocity_x'].size, npart0)
+    assert_equal(ad['particle1', 'particle_velocity_y'].size, npart1)
+    assert_equal(ad['all', 'particle_velocity_z'].size, npart0 + npart1)
+
+    np.all(ad['particle1', 'particle_mass'] == ad['particle1', 'particle_mass'][0])
+    np.all(ad['particle0', 'particle_mass'] == ad['particle0', 'particle_mass'][0])
+
+    left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], 'code_length')
+    right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], 'code_length')
+    center = 0.5*(left_edge + right_edge)
+                   
+    reg = ds.region(center, left_edge, right_edge)
+
+    assert(np.all(np.logical_and(reg['particle_position_x'] <= right_edge[0], 
+                                 reg['particle_position_x'] >= left_edge[0])))
+
+    assert(np.all(np.logical_and(reg['particle_position_y'] <= right_edge[1], 
+                                 reg['particle_position_y'] >= left_edge[1])))
+
+    assert(np.all(np.logical_and(reg['particle_position_z'] <= right_edge[2], 
+                                 reg['particle_position_z'] >= left_edge[2])))
+
+ at requires_file(rt)
+def test_OrionDataset():
+    assert isinstance(data_dir_load(rt), OrionDataset)
+
+ at requires_file(LyA)
+def test_NyxDataset():
+    assert isinstance(data_dir_load(LyA), NyxDataset)
+
+ at requires_file(LyA)
+def test_WarpXDataset():
+    assert isinstance(data_dir_load(plasma), WarpXDataset)
+
+ at requires_file(rt)
+def test_units_override():
+    for test in units_override_check(rt):
+        yield test

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list