[yt-svn] commit/yt: 7 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jul 17 14:36:20 PDT 2017


7 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/6b16ea9f2306/
Changeset:   6b16ea9f2306
User:        Corentin Cadiou
Date:        2017-07-05 08:26:19+00:00
Summary:     possible to force detection of cosmo simulation
Affected #:  1 file

diff -r 2447f34f2682526765e4b9dd5dd247f062cd3d5a -r 6b16ea9f23065ade24f00dae27ea02bcc185b181 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -157,15 +157,15 @@
         self.local_particle_count = hvals['npart']
 
         particle_fields = [
-            ("particle_position_x", "d"),
-            ("particle_position_y", "d"),
-            ("particle_position_z", "d"),
-            ("particle_velocity_x", "d"),
-            ("particle_velocity_y", "d"),
-            ("particle_velocity_z", "d"),
-            ("particle_mass", "d"),
-            ("particle_identifier", "i"),
-            ("particle_refinement_level", "I")]
+                ("particle_position_x", "d"),
+                ("particle_position_y", "d"),
+                ("particle_position_z", "d"),
+                ("particle_velocity_x", "d"),
+                ("particle_velocity_y", "d"),
+                ("particle_velocity_z", "d"),
+                ("particle_mass", "d"),
+                ("particle_identifier", "i"),
+                ("particle_refinement_level", "I")]
         if hvals["nstar_tot"] > 0:
             particle_fields += [("particle_age", "d"),
                                 ("particle_metallicity", "d")]
@@ -209,7 +209,7 @@
 
     def _read_amr(self):
         """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain
+           For each oct, only the position, index, level and domain 
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
@@ -235,7 +235,7 @@
         min_level = self.ds.min_level
         # yt max level is not the same as the RAMSES one.
         # yt max level is the maximum number of additional refinement levels
-        # so for a uni grid run with no refinement, it would be 0.
+        # so for a uni grid run with no refinement, it would be 0. 
         # So we initially assume that.
         max_level = 0
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
@@ -372,7 +372,7 @@
             dsl.update(set(domain.particle_field_offsets.keys()))
         self.particle_field_list = list(dsl)
         self.field_list = [("ramses", f) for f in self.fluid_field_list] \
-                          + self.particle_field_list
+                        + self.particle_field_list
 
     def _setup_auto_fields(self):
         '''
@@ -380,7 +380,7 @@
         '''
         # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
         # Find nvar
-
+        
 
         # TODO: copy/pasted from DomainFile; needs refactoring!
         num = os.path.basename(self.dataset.parameter_filename).split("."
@@ -414,25 +414,25 @@
             raise ValueError
         # Basic hydro runs
         if nvar == 5:
-            fields = ["Density",
-                      "x-velocity", "y-velocity", "z-velocity",
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
                       "Pressure"]
         if nvar > 5 and nvar < 11:
-            fields = ["Density",
-                      "x-velocity", "y-velocity", "z-velocity",
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
                       "Pressure", "Metallicity"]
         # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
         if nvar == 11:
-            fields = ["Density",
-                      "x-velocity", "y-velocity", "z-velocity",
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
                       "Pressure"]
         if nvar > 11:
-            fields = ["Density",
-                      "x-velocity", "y-velocity", "z-velocity",
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
                       "Pressure","Metallicity"]
         while len(fields) < nvar:
             fields.append("var"+str(len(fields)))
@@ -490,9 +490,9 @@
         return {'io': npart}
 
     def print_stats(self):
-
+        
         # This function prints information based on the fluid on the grids,
-        # and therefore does not work for DM only runs.
+        # and therefore does not work for DM only runs. 
         if not self.fluid_field_list:
             print("This function is not implemented for DM only runs")
             return
@@ -532,11 +532,11 @@
     _index_class = RAMSESIndex
     _field_info_class = RAMSESFieldInfo
     gamma = 1.4 # This will get replaced on hydro_fn open
-
+    
     def __init__(self, filename, dataset_type='ramses',
-                 fields = None, storage_filename = None,
+                 fields=None, storage_filename=None,
                  units_override=None, unit_system="cgs",
-                 extra_particle_fields=None):
+                 extra_particle_fields=None, cosmological=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]
@@ -544,14 +544,18 @@
         fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
                 If set to None, will try a default set of fields
         extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file.
+        cosmological: If set to None, automatically detect cosmological simulation. If a boolean, force 
+                      its value.
         '''
         self.fluid_types += ("ramses",)
         self._fields_in_file = fields
         self._extra_particle_fields = extra_particle_fields
+        self.force_cosmological = cosmological
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
         self.storage_filename = storage_filename
 
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
 
@@ -566,7 +570,7 @@
         time_unit = self.parameters['unit_t']
 
         # calculating derived units (except velocity and temperature, done below)
-        mass_unit = density_unit * length_unit**3
+        mass_unit = density_unit * length_unit**3     
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))
         pressure_unit = density_unit * (length_unit / time_unit)**2
@@ -637,7 +641,13 @@
         # This is likely not true, but it's not clear how to determine the boundary conditions
         self.periodicity = (True, True, True)
         # These conditions seem to always be true for non-cosmological datasets
-        if rheader["time"] >= 0 and rheader["H0"] == 1 and rheader["aexp"] == 1:
+        if self.force_cosmological is not None:
+            is_cosmological = self.force_cosmological
+        else:
+            is_cosmological = (rheader["time"] >= 0 and
+                               rheader["H0"] == 1 and
+                               rheader["aexp"] == 1)
+        if not is_cosmological:
             self.cosmological_simulation = 0
             self.current_redshift = 0
             self.hubble_constant = 0
@@ -665,7 +675,7 @@
 
             self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
                              self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
-
+ 
             self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
 
 


https://bitbucket.org/yt_analysis/yt/commits/3a6543cd4a4a/
Changeset:   3a6543cd4a4a
User:        Corentin Cadiou
Date:        2017-07-05 08:26:29+00:00
Summary:     test cosmo detection
Affected #:  1 file

diff -r 6b16ea9f23065ade24f00dae27ea02bcc185b181 -r 3a6543cd4a4ac9f9806b14fa0a128b75279f8ceb yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -62,14 +62,54 @@
 
 ramsesNonCosmo = 'DICEGalaxyDisk_nonCosmological/output_00002'
 @requires_file(ramsesNonCosmo)
+def test_non_cosmo_detection():
+    path = os.path.join(ramsesNonCosmo, 'info_00002.txt')
+    ds = yt.load(path, cosmological=False)
+    assert_equal(ds.cosmological_simulation, 0)
+
+    ds = yt.load(path, cosmological=None)
+    assert_equal(ds.cosmological_simulation, 0)
+
+    ds = yt.load(path)
+    assert_equal(ds.cosmological_simulation, 0)
+
+
+ at requires_file(ramsesNonCosmo)
 def test_unit_non_cosmo():
-    ds = yt.load(os.path.join(ramsesNonCosmo, 'info_00002.txt'))
+    for force_cosmo in [False, None]:
+        ds = yt.load(os.path.join(ramsesNonCosmo, 'info_00002.txt'), cosmological=force_cosmo)
+
+        expected_raw_time = 0.0299468077820411 # in ramses unit
+        assert_equal(ds.current_time.value, expected_raw_time)
+
+        expected_time = 14087886140997.336 # in seconds
+        assert_equal(ds.current_time.in_units('s').value, expected_time)
+
 
-    expected_raw_time = 0.0299468077820411 # in ramses unit
-    assert_equal(ds.current_time.value, expected_raw_time)
+ramsesCosmo = 'output_00080/info_00080.txt'
+ at requires_file(ramsesCosmo)
+def test_cosmo_detection():
+    ds = yt.load(ramsesCosmo, cosmological=True)
+    assert_equal(ds.cosmological_simulation, 1)
+
+    ds = yt.load(ramsesCosmo, cosmological=None)
+    assert_equal(ds.cosmological_simulation, 1)
+
+    ds = yt.load(ramsesCosmo)
+    assert_equal(ds.cosmological_simulation, 1)
 
-    expected_time = 14087886140997.336 # in seconds
-    assert_equal(ds.current_time.in_units('s').value, expected_time)
+
+ at requires_file(ramsesCosmo)
+def test_unit_cosmo():
+    for force_cosmo in [True, None]:
+        ds = yt.load(ramsesCosmo, cosmological=force_cosmo)
+
+        expected_raw_time = 1.119216564055017 # in ramses unit
+        assert_equal(ds.current_time.value, expected_raw_time)
+
+        expected_time = 3.756241729312462e+17 # in seconds
+        assert_equal(ds.current_time.in_units('s').value, expected_time)
+
 
 ramsesExtraFieldsSmall = 'ramses_extra_fields_small/output_00001'
 @requires_file(ramsesExtraFieldsSmall)


https://bitbucket.org/yt_analysis/yt/commits/121cee1aa9d7/
Changeset:   121cee1aa9d7
User:        Corentin Cadiou
Date:        2017-07-05 08:52:56+00:00
Summary:     add doc
Affected #:  1 file

diff -r 3a6543cd4a4ac9f9806b14fa0a128b75279f8ceb -r 121cee1aa9d7d9fb6b09a53e2d3b2a9f18ee94db doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1922,6 +1922,11 @@
    # ('all', 'family') and ('all', 'info') now in ds.field_list
 
 
+It is possible to force yt to treat the simulation as a cosmological
+simulation by providing the ``cosmological=True`` parameter (or
+``False`` to force non-cosmology). If left to ``None``, the kind of
+the simulation is inferred from the data.
+
 .. _loading-sph-data:
 
 SPH Particle Data


https://bitbucket.org/yt_analysis/yt/commits/10431e247718/
Changeset:   10431e247718
User:        Corentin Cadiou
Date:        2017-07-06 15:02:28+00:00
Summary:     fix logical mistake
Affected #:  1 file

diff -r 121cee1aa9d7d9fb6b09a53e2d3b2a9f18ee94db -r 10431e2477186d5573dc26b89124dfdb6346b8fe yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -644,9 +644,9 @@
         if self.force_cosmological is not None:
             is_cosmological = self.force_cosmological
         else:
-            is_cosmological = (rheader["time"] >= 0 and
-                               rheader["H0"] == 1 and
-                               rheader["aexp"] == 1)
+            is_cosmological = not (rheader["time"] >= 0 and
+                                   rheader["H0"] == 1 and
+                                   rheader["aexp"] == 1)
         if not is_cosmological:
             self.cosmological_simulation = 0
             self.current_redshift = 0


https://bitbucket.org/yt_analysis/yt/commits/02e1ca816bd4/
Changeset:   02e1ca816bd4
User:        Corentin Cadiou
Date:        2017-07-06 15:07:42+00:00
Summary:     move comment
Affected #:  1 file

diff -r 10431e2477186d5573dc26b89124dfdb6346b8fe -r 02e1ca816bd4ef886612fd3f926561e916c43bfa yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -640,13 +640,15 @@
         self.domain_right_edge = np.ones(3, dtype='float64')
         # This is likely not true, but it's not clear how to determine the boundary conditions
         self.periodicity = (True, True, True)
-        # These conditions seem to always be true for non-cosmological datasets
+
         if self.force_cosmological is not None:
             is_cosmological = self.force_cosmological
         else:
+            # These conditions seem to always be true for non-cosmological datasets
             is_cosmological = not (rheader["time"] >= 0 and
                                    rheader["H0"] == 1 and
                                    rheader["aexp"] == 1)
+
         if not is_cosmological:
             self.cosmological_simulation = 0
             self.current_redshift = 0


https://bitbucket.org/yt_analysis/yt/commits/e9c2bc5762e6/
Changeset:   e9c2bc5762e6
User:        ngoldbaum
Date:        2017-07-16 14:34:36+00:00
Summary:     Merge branch 'master' into feature/force-cosmo
Affected #:  42 files

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 README.md
--- a/README.md
+++ b/README.md
@@ -97,6 +97,32 @@
 
 We have developed some [guidelines](CONTRIBUTING.rst) for contributing to yt.
 
+**Imposter syndrome disclaimer**: We want your help. No, really.
+
+There may be a little voice inside your head that is telling you that you're not
+ready to be an open source contributor; that your skills aren't nearly good
+enough to contribute. What could you possibly offer a project like this one?
+
+We assure you - the little voice in your head is wrong. If you can write code at
+all, you can contribute code to open source. Contributing to open source
+projects is a fantastic way to advance one's coding skills. Writing perfect code
+isn't the measure of a good developer (that would disqualify all of us!); it's
+trying to create something, making mistakes, and learning from those
+mistakes. That's how we all improve, and we are happy to help others learn.
+
+Being an open source contributor doesn't just mean writing code, either. You can
+help out by writing documentation, tests, or even giving feedback about the
+project (and yes - that includes giving feedback about the contribution
+process). Some of these contributions may be the most valuable to the project as
+a whole, because you're coming to the project with fresh eyes, so you can see
+the errors and assumptions that seasoned contributors have glossed over.
+
+(This disclaimer was originally written by
+[Adrienne Lowe](https://github.com/adriennefriend) for a
+[PyCon talk](https://www.youtube.com/watch?v=6Uj746j9Heo), and was adapted by yt
+based on its use in the README file for the
+[MetPy project](https://github.com/Unidata/MetPy))
+
 ## Resources
 
 We have some community and documentation resources available.

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 doc/source/cookbook/simple_1d_line_plot.py
--- /dev/null
+++ b/doc/source/cookbook/simple_1d_line_plot.py
@@ -0,0 +1,14 @@
+import yt
+
+# Load the dataset
+ds = yt.load("SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e", step=-1)
+
+# Create a line plot of the variables 'u' and 'v' with 1000 sampling points evenly spaced
+# between the coordinates (0, 0, 0) and (0, 1, 0)
+plot = yt.LinePlot(ds, [('all', 'v'), ('all', 'u')], (0, 0, 0), (0, 1, 0), 1000)
+
+# Add a legend
+plot.add_legend(('all', 'v'))
+
+# Save the line plot
+plot.save()

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 doc/source/cookbook/simple_plots.rst
--- a/doc/source/cookbook/simple_plots.rst
+++ b/doc/source/cookbook/simple_plots.rst
@@ -43,6 +43,14 @@
 
 .. yt_cookbook:: simple_phase.py
 
+Simple 1D Line Plotting
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This script shows how to make a ``LinePlot`` through a dataset.
+See :ref:`how-to-1d-line-plot` for more information.
+
+.. yt_cookbook:: simple_1d_line_plot.py
+
 Simple Probability Distribution Functions
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1023,7 +1023,7 @@
 
 Gadget data in raw binary format can also be loaded with the ``load`` command.
 This is supported for snapshots created with the ``SnapFormat`` parameter
-set to 1 (the standard for Gadget-2) or 2.
+set to 1 or 2.
 
 .. code-block:: python
 
@@ -1036,24 +1036,46 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
 There are two additional pieces of information that may be needed.  If your
-simulation is cosmological, yt can often guess the bounding box and the units
-of the simulation.  However, for isolated simulations and for cosmological
-simulations with non-standard units, these must be supplied.  For example, if
-a length unit of 1.0 corresponds to a kiloparsec, you can supply this in the
-constructor.  yt can accept units such as ``Mpc``, ``kpc``, ``cm``, ``Mpccm/h``
-and so on.  In particular, note that ``Mpc/h`` and ``Mpccm/h`` (``cm`` for
-comoving here) are usable unit definitions.
+simulation is cosmological, yt can often guess the bounding box and the units of
+the simulation.  However, for isolated simulations and for cosmological
+simulations with non-standard units, these must be supplied by the user.  For
+example, if a length unit of 1.0 corresponds to a kiloparsec, you can supply
+this in the constructor.  yt can accept units such as ``Mpc``, ``kpc``, ``cm``,
+``Mpccm/h`` and so on.  In particular, note that ``Mpc/h`` and ``Mpccm/h``
+(``cm`` for comoving here) are usable unit definitions.
 
 yt will attempt to use units for ``mass``, ``length`` and ``time`` as supplied
 in the argument ``unit_base``.  The ``bounding_box`` argument is a list of
 two-item tuples or lists that describe the left and right extents of the
-particles.
+particles. In this example we load a dataset with a custom bounding box
+and units.
 
 .. code-block:: python
 
-   ds = GadgetDataset("snap_004",
-           unit_base = {'length': ('kpc', 1.0)},
-           bounding_box = [[-600.0, 600.0], [-600.0, 600.0], [-600.0, 600.0]])
+
+   bbox = [[-600.0, 600.0], [-600.0, 600.0], [-600.0, 600.0]]
+   unit_base = {
+       'length': (1.0, 'kpc'),
+       'velocity: (1.0, 'km/s'),
+       'mass': (1.0, 'Msun')
+   }
+
+   ds = yt.load("snap_004", unit_base=unit_base, bounding_box=bbox)
+
+In addition, you can use ``UnitLength_in_cm``, ``UnitVelocity_in_cm_per_s``,
+and ``UnitMass_in_g`` as keys for the ``unit_base`` dictionary. These names
+come from the names used in the Gadget runtime parameter file. This example
+will initialize a dataset with the same units as the example above:
+
+.. code-block:: python
+
+  unit_base = {
+      'UnitLength_in_cm': 3.09e21,
+      'UnitVelocity_in_cm_per_s': 1e5
+      'UnitMass_in_g': 1.989e33
+   }
+
+  ds = yt.load("snap_004", unit_base=unit_base, bounding_box=bbox)
 
 .. _particle-indexing-criteria:
 
@@ -1921,6 +1943,9 @@
    ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
    # ('all', 'family') and ('all', 'info') now in ds.field_list
 
+yt supports outputs made by the mainline ``RAMSES`` code as well as the
+``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
+based on the presence of a ``into_rt_*.txt`` file in the output directory.
 
 It is possible to force yt to treat the simulation as a cosmological
 simulation by providing the ``cosmological=True`` parameter (or

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -162,10 +162,11 @@
 these steps:
 
 * Identify what it is that went wrong, and how you knew it went wrong.
-* Put your script, errors, and outputs online:
+* Put your script, errors, inputs and outputs online:
 
   * ``$ yt pastebin script.py`` - pastes script.py online
   * ``$ yt upload_image image.png`` - pastes image online
+  * ``$ yt upload my_input.tar`` - pastes my_input.tar online
 
 * Identify which version of the code you’re using.
 

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -18,6 +18,7 @@
    ~yt.visualization.plot_window.OffAxisProjectionPlot
    ~yt.visualization.plot_window.WindowPlotMPL
    ~yt.visualization.plot_window.PlotWindow
+   ~yt.visualization.plot_window.plot_2d
 
 ProfilePlot and PhasePlot
 ^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -176,6 +176,20 @@
 
    yt pastebin_grab 1768
 
+upload
+++++++
+
+Upload a file to a public curldrop instance. Curldrop is a simple web
+application that allows you to upload and download files straight from your
+Terminal with an http client like e.g. curl. It was initially developed by
+`Kevin Kennell <https://github.com/kennell/curldrop>`_ and later forked and
+adjusted for yt’s needs. After a successful upload you will receive a url that
+can be used to share the data with other people.
+
+.. code-block:: bash
+
+   yt upload my_file.tar.gz
+
 plot
 ++++
 

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -14,8 +14,8 @@
 
 The :class:`~yt.visualization.plot_window.PlotWindow` interface is useful for
 taking a quick look at simulation outputs.  Simple mechanisms exist for making
-plots of slices, projections, 1D profiles, and 2D profiles (phase plots), all of
-which are described below.
+plots of slices, projections, 1D spatial line plots, 1D profiles, and 2D
+profiles (phase plots), all of which are described below.
 
 .. _viewing-plots:
 
@@ -30,7 +30,7 @@
 in other environments as well:
 
 .. code-block:: python
- 
+
    %matplotlib notebook
    import yt
    yt.toggle_interactivity()
@@ -203,6 +203,30 @@
 See :class:`~yt.visualization.plot_window.AxisAlignedSlicePlot` for the
 full class description.
 
+.. _plot-2d:
+
+Plots of 2D Datasets
+~~~~~~~~~~~~~~~~~~~~
+
+If you have a two-dimensional cartesian, cylindrical, or polar dataset, 
+:func:`~yt.visualization.plot_window.plot_2d` is a way to make a plot
+within the dataset's plane without having to specify the axis, which
+in this case is redundant. Otherwise, ``plot_2d`` accepts the same
+arguments as ``SlicePlot``. The one other difference is that the
+``center`` keyword argument can be a two-dimensional coordinate instead
+of a three-dimensional one:
+
+.. python-script::
+
+    import yt
+    ds = yt.load("WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030")
+    p = yt.plot_2d(ds, "density", center=[1.0, 0.4])
+    p.set_log("density", False)
+    p.save()
+
+See :func:`~yt.visualization.plot_window.plot_2d` for the full description
+of the function and its keywords.
+
 .. _off-axis-slices:
 
 Off Axis Slices
@@ -943,6 +967,7 @@
    # Save the image.
    plot.save()
 
+
 Customizing axis limits
 ~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -1054,6 +1079,87 @@
     # change only the first line
     plot.set_line_property("linestyle", "--", 0)
 
+.. _how-to-1d-unstructured-mesh:
+
+1D Line Sampling
+----------------
+
+YT has the ability to sample datasets along arbitrary lines
+and plot the result. You must supply five arguments to the ``LinePlot``
+class. They are enumerated below:
+
+1. Dataset
+2. A list of fields or a single field you wish to plot
+3. The starting point of the sampling line. This should be an n-element list, tuple,
+   ndarray, or YTArray with the elements corresponding to the coordinates of the
+   starting point. (n should equal the dimension of the dataset)
+4. The ending point of the sampling line. This should also be an n-element list, tuple,
+   ndarray, or YTArray with the elements corresponding to the coordinates of the
+   ending point.
+5. The number of sampling points along the line, e.g. if 1000 is specified, then
+   data will be sampled at 1000 points evenly spaced between the starting and
+   ending points.
+
+The below code snippet illustrates how this is done:
+
+.. code-block:: python
+
+   ds = yt.load("SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e", step=-1)
+   plot = yt.LinePlot(ds, [('all', 'v'), ('all', 'u')], (0, 0, 0), (0, 1, 0), 1000)
+   plot.save()
+
+If working in a Jupyter Notebook, ``LinePlot`` also has the ``show()`` method.
+
+You can can add a legend to a 1D sampling plot. The legend process takes two steps:
+
+1. When instantiating the ``LinePlot``, pass a dictionary of
+   labels with keys corresponding to the field names
+2. Call the ``LinePlot`` ``add_legend`` method
+
+X- and Y- axis units can be set with ``set_x_unit`` and ``set_unit`` methods
+respectively. The below code snippet combines all the features we've discussed:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+   plot = yt.LinePlot(ds, 'density', [0, 0, 0], [1, 1, 1], 512)
+   plot.add_legend('density')
+   plot.set_x_unit('cm')
+   plot.set_unit('density', 'kg/cm**3')
+   plot.save()
+
+If a list of fields is passed to ``LinePlot``, yt will create a number of
+individual figures equal to the number of different dimensional
+quantities. E.g. if ``LinePlot`` receives two fields with units of "length/time"
+and a field with units of "temperature", two different figures will be created,
+one with plots of the "length/time" fields and another with the plot of the
+"temperature" field. It is only necessary to call ``add_legend``
+for one field of a multi-field plot to produce a legend containing all the
+labels passed in the initial construction of the ``LinePlot`` instance. Example:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load("SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e", step=-1)
+   plot = yt.LinePlot(ds, [('all', 'v'), ('all', 'u')], [0, 0, 0], [0, 1, 0],
+                      100, labels={('all', 'u') : r"v$_x$", ('all', 'v') : r"v$_y$"})
+   plot.add_legend(('all', 'u'))
+   plot.save()
+
+``LinePlot`` is a bit different from yt ray objects which are data
+containers. ``LinePlot`` is a plotting class that may use yt ray objects to
+supply field plotting information. However, perhaps the most important
+difference to highlight between rays and ``LinePlot`` is that rays return data
+elements that intersect with the ray and make no guarantee about the spacing
+between data elements. ``LinePlot`` sampling points are guaranteed to be evenly
+spaced. In the case of cell data where multiple points fall within the same
+cell, the ``LinePlot`` object will show the same field value for each sampling
+point that falls within the same cell.
+
 .. _how-to-make-2d-profiles:
 
 2D Phase Plots

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -20,7 +20,7 @@
   local_fits_001:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_007:
+  local_flash_008:
     - yt/frontends/flash/tests/test_outputs.py
 
   local_gadget_001:
@@ -45,7 +45,7 @@
   local_owls_001:
     - yt/frontends/owls/tests/test_outputs.py
 
-  local_pw_016:
+  local_pw_017:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -57,21 +57,22 @@
   local_tipsy_002:
     - yt/frontends/tipsy/tests/test_outputs.py
 
-  local_varia_008:
+  local_varia_009:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/visualization/volume_rendering/tests/test_vr_orientation.py
     - yt/fields/tests/test_xray_fields.py
 
-  local_photon_001:
+  local_photon_002:
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
     - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
 
-  local_unstructured_006:
+  local_unstructured_008:
     - yt/visualization/volume_rendering/tests/test_mesh_render.py
     - yt/visualization/tests/test_mesh_slices.py:test_tri2
     - yt/visualization/tests/test_mesh_slices.py:test_quad2
     - yt/visualization/tests/test_mesh_slices.py:test_multi_region
+    - yt/visualization/tests/test_line_plots.py:test_line_plot
 
   local_boxlib_004:
     - yt/frontends/boxlib/tests/test_outputs.py:test_radadvect
@@ -83,12 +84,13 @@
     - yt/frontends/boxlib/tests/test_outputs.py:test_units_override
     - yt/frontends/boxlib/tests/test_outputs.py:test_raw_fields
 
-  local_boxlib_particles_003:
+  local_boxlib_particles_004:
     - yt/frontends/boxlib/tests/test_outputs.py:test_LyA
     - yt/frontends/boxlib/tests/test_outputs.py:test_nyx_particle_io
     - yt/frontends/boxlib/tests/test_outputs.py:test_castro_particle_io
     - yt/frontends/boxlib/tests/test_outputs.py:test_langmuir
     - yt/frontends/boxlib/tests/test_outputs.py:test_plasma
+    - yt/frontends/boxlib/tests/test_outputs.py:test_beam
     - yt/frontends/boxlib/tests/test_outputs.py:test_warpx_particle_io
     - yt/frontends/boxlib/tests/test_outputs.py:test_NyxDataset
     - yt/frontends/boxlib/tests/test_outputs.py:test_WarpXDataset

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -103,13 +103,13 @@
     FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     write_bitmap, write_image, \
     apply_colormap, scale_image, write_projection, \
-    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
+    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, LinePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps, add_cmap, make_colormap, \
     ProfilePlot, PhasePlot, ParticlePhasePlot, \
     ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot, \
     FITSImageData, FITSSlice, FITSProjection, FITSOffAxisSlice, \
-    FITSOffAxisProjection
+    FITSOffAxisProjection, plot_2d
 
 from yt.visualization.volume_rendering.api import \
     volume_render, create_scene, ColorTransferFunction, TransferFunction, \

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -63,6 +63,7 @@
     imagebin_api_key = 'e1977d9195fe39e',
     imagebin_upload_url = 'https://api.imgur.com/3/upload',
     imagebin_delete_url = 'https://api.imgur.com/3/image/{delete_hash}',
+    curldrop_upload_url = 'http://use.yt/upload',
     thread_field_detection = 'False',
     ignore_invalid_unit_operation_errors = 'False',
     chunk_size = '1000',

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -342,6 +342,10 @@
                     self._initialize_projected_units(fields, chunk)
                     _units_initialized = True
                 self._handle_chunk(chunk, fields, tree)
+        # if there's less than nprocs chunks, units won't be initialized
+        # on all processors, so sync with _projected_units on rank 0
+        projected_units = self.comm.mpi_bcast(self._projected_units)
+        self._projected_units = projected_units
         # Note that this will briefly double RAM usage
         if self.method == "mip":
             merge_style = -1

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -13,6 +13,7 @@
 
 import contextlib
 import inspect
+import re
 import warnings
 
 from yt.extern.six import string_types, PY2
@@ -144,6 +145,8 @@
                 self.units = units
         elif isinstance(units, Unit):
             self.units = str(units)
+        elif isinstance(units, bytes):
+            self.units = units.decode("utf-8")
         else:
             raise FieldUnitsError("Cannot handle units '%s' (type %s)." \
                                   "Please provide a string or Unit " \
@@ -292,6 +295,51 @@
         s += ")"
         return s
 
+    def _is_ion(self):
+        p = re.compile("_p[0-9]+_")
+        result = False
+        if p.search(self.name[1]) is not None:
+            result = True
+        return result
+
+    def _ion_to_label(self):
+        pnum2rom = {
+            "0":"I", "1":"II", "2":"III", "3":"IV", "4":"V",
+            "5":"VI", "6":"VII", "7":"VIII", "8":"IX", "9":"X",
+            "10":"XI", "11":"XII", "12":"XIII", "13":"XIV", "14":"XV",
+            "15":"XVI", "16":"XVII", "17":"XVIII", "18":"XIX", "19":"XX"}
+
+        p = re.compile("_p[0-9]+_")
+        m = p.search(self.name[1])
+        if m is not None:
+            pstr = m.string[m.start()+1:m.end()-1]
+            segments = self.name[1].split("_")
+            for i,s in enumerate(segments):
+                segments[i] = s.capitalize()
+                if s == pstr:
+                    ipstr = i
+            element = segments[ipstr-1]
+            roman = pnum2rom[pstr[1:]]
+            label = element + '\ ' + roman + '\ ' + \
+                '\ '.join(segments[ipstr+1:])
+        else:
+            label = self.name[1]
+        return label
+
+    def get_latex_display_name(self):
+        label = self.display_name
+        if label is None:
+            if self._is_ion():
+                fname = self._ion_to_label()
+                label = r'$\rm{'+fname.replace('_','\ ')+r'}$'
+            else:
+                label = r'$\rm{'+self.name[1].replace('_','\ ').title()+r'}$'
+        elif label.find('$') == -1:
+            label = label.replace(' ','\ ')
+            label = r'$\rm{'+label+r'}$'
+        return label
+
+
 class FieldValidator(object):
     pass
 
@@ -361,6 +409,7 @@
         FieldValidator.__init__(self)
         self.ghost_zones = ghost_zones
         self.fields = fields
+
     def __call__(self, data):
         # When we say spatial information, we really mean
         # that it has a three-dimensional data structure

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/fields/xray_emission_fields.py
--- a/yt/fields/xray_emission_fields.py
+++ b/yt/fields/xray_emission_fields.py
@@ -20,7 +20,11 @@
 
 from yt.config import ytcfg
 from yt.fields.derived_field import DerivedField
-from yt.funcs import mylog, only_on_root, issue_deprecation_warning
+from yt.funcs import \
+    mylog, \
+    only_on_root, \
+    issue_deprecation_warning, \
+    parse_h5_attr
 from yt.utilities.exceptions import YTFieldNotFound
 from yt.utilities.exceptions import YTException
 from yt.utilities.linear_interpolators import \
@@ -95,12 +99,12 @@
         only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
         in_file = h5py.File(filename, "r")
         if "info" in in_file.attrs:
-            only_on_root(mylog.info, in_file.attrs["info"].decode('utf8'))
-        if in_file.attrs["version"] != data_version[table_type]:
+            only_on_root(mylog.info, parse_h5_attr(in_file, "info"))
+        if parse_h5_attr(in_file, "version") != data_version[table_type]:
             raise ObsoleteDataException(table_type)
         else:
             only_on_root(mylog.info, "X-ray '%s' emissivity data version: %s." % \
-                         (table_type, in_file.attrs["version"]))
+                         (table_type, parse_h5_attr(in_file, "version")))
 
         self.log_T = in_file["log_T"][:]
         self.emissivity_primordial = in_file["emissivity_primordial"][:]

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -30,7 +30,6 @@
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
-from yt.units import YTQuantity
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
@@ -1382,6 +1381,48 @@
     return boxes, file_names, offsets
 
 
+class WarpXHeader(object):
+    def __init__(self, header_fn):
+        self.data = {}
+        with open(header_fn, "r") as f:
+            self.data["Checkpoint_version"] = int(f.readline().strip().split()[-1])
+            
+            self.data["num_levels"] = int(f.readline().strip().split()[-1])
+            self.data["istep"]      = [int(num) for num in f.readline().strip().split()]
+            self.data["nsubsteps"]  = [int(num) for num in f.readline().strip().split()]
+            
+            self.data["t_new"] = [float(num) for num in f.readline().strip().split()]
+            self.data["t_old"] = [float(num) for num in f.readline().strip().split()]
+            self.data["dt"]    = [float(num) for num in f.readline().strip().split()]
+            
+            self.data["moving_window_x"] = float(f.readline().strip().split()[-1])
+
+            #  not all datasets will have is_synchronized
+            line = f.readline().strip().split()
+            if (len(line) == 1):                
+                self.data["is_synchronized"] = bool(line[-1])
+                self.data["prob_lo"] = [float(num) for num in f.readline().strip().split()]
+            else:
+                self.data["is_synchronized"] = True                
+                self.data["prob_lo"] = [float(num) for num in line]
+                            
+            self.data["prob_hi"] = [float(num) for num in f.readline().strip().split()]
+            
+            for _ in range(self.data["num_levels"]):
+                num_boxes = int(f.readline().strip().split()[0][1:])
+                for __ in range(num_boxes):
+                    f.readline()
+                f.readline()
+                
+            i = 0
+            line = f.readline()
+            while line:
+                line = line.strip().split()
+                self.data["species_%d" % i] = [float(val) for val in line]
+                i = i + 1
+                line = f.readline()
+
+
 class WarpXHierarchy(BoxlibHierarchy):
 
     def __init__(self, ds, dataset_type="boxlib_native"):
@@ -1392,27 +1433,17 @@
             self._read_particles(ptype, is_checkpoint)
         
         # Additional WarpX particle information (used to set up species)
-        with open(self.ds.output_dir + "/WarpXHeader", 'r') as f:
-
-            # skip to the end, where species info is written out
-            line = f.readline()
-            while line and line != ')\n':
-                line = f.readline()
-            line = f.readline()
-
-            # Read in the species information
-            species_id = 0
-            while line:
-                line = line.strip().split()
-                charge = YTQuantity(float(line[0]), "C")
-                mass = YTQuantity(float(line[1]), "kg")
-                charge_name = 'particle%.1d_charge' % species_id
-                mass_name = 'particle%.1d_mass' % species_id
-                self.parameters[charge_name] = charge
-                self.parameters[mass_name] = mass
-                line = f.readline()
-                species_id += 1
-    
+        self.warpx_header = WarpXHeader(self.ds.output_dir + "/WarpXHeader")
+        
+        i = 0
+        for key, val in self.warpx_header.data.items():
+            if key.startswith("species_"):
+                charge_name = 'particle%.1d_charge' % i
+                mass_name = 'particle%.1d_mass' % i
+                self.parameters[charge_name] = val[0]
+                self.parameters[mass_name] = val[1]
+                i = i + 1
+                
     def _detect_output_fields(self):
         super(WarpXHierarchy, self)._detect_output_fields()
 

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -18,6 +18,7 @@
     boltzmann_constant_cgs, amu_cgs
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from yt.units import YTQuantity
 
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
@@ -93,7 +94,7 @@
 
         def get_mass(field, data):
             species_mass = data.ds.index.parameters[ptype + '_mass']
-            return data["particle_weight"]*species_mass
+            return data["particle_weight"]*YTQuantity(species_mass, 'kg')
 
         self.add_field((ptype, "particle_mass"), sampling_type="particle",
                        function=get_mass,
@@ -101,7 +102,7 @@
 
         def get_charge(field, data):
             species_charge = data.ds.index.parameters[ptype + '_charge']
-            return data["particle_weight"]*species_charge
+            return data["particle_weight"]*YTQuantity(species_charge, 'C')
 
         self.add_field((ptype, "particle_charge"), sampling_type="particle",
                        function=get_charge,

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/boxlib/tests/test_outputs.py
--- a/yt/frontends/boxlib/tests/test_outputs.py
+++ b/yt/frontends/boxlib/tests/test_outputs.py
@@ -167,6 +167,17 @@
         test_plasma.__name__ = test.description
         yield test
 
+beam = "GaussianBeam/plt03008"
+ at requires_ds(beam)
+def test_beam():
+    ds = data_dir_load(beam)
+    assert_equal(str(ds), "plt03008")
+    for test in small_patch_amr(ds, _warpx_fields,
+                                input_center="c",
+                                input_weight="Ex"):
+        test_beam.__name__ = test.description
+        yield test
+
 @requires_file(plasma)
 def test_warpx_particle_io():
     ds = data_dir_load(plasma)

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/exodus_ii/fields.py
--- a/yt/frontends/exodus_ii/fields.py
+++ b/yt/frontends/exodus_ii/fields.py
@@ -33,6 +33,8 @@
 
     def __init__(self, ds, field_list):
         super(ExodusIIFieldInfo, self).__init__(ds, field_list)
+        for name in self:
+            self[name].take_log = False
         # If you want, you can check self.field_list
 
     def setup_fluid_fields(self):

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -153,12 +153,6 @@
             gre[:,2] = 2.0 * np.pi
             return
 
-        # Now, for cartesian data.
-        for i in range(self.num_grids):
-            dx = dxs[self.grid_levels[i],:]
-            gle[i][:ND] = np.rint(gle[i][:ND]/dx[0][:ND])*dx[0][:ND]
-            gre[i][:ND] = np.rint(gre[i][:ND]/dx[0][:ND])*dx[0][:ND]
-
     def _populate_grid_objects(self):
         ii = np.argsort(self.grid_levels.flat)
         gid = self._handle["/gid"][:]

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/halo_catalog/io.py
--- a/yt/frontends/halo_catalog/io.py
+++ b/yt/frontends/halo_catalog/io.py
@@ -18,7 +18,9 @@
 import numpy as np
 
 from yt.utilities.exceptions import YTDomainOverflow
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    parse_h5_attr
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -45,7 +47,7 @@
         pn = "particle_position_%s"
         for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
-                units = f[pn % "x"].attrs["units"]
+                units = parse_h5_attr(f[pn % "x"], "units")
                 x, y, z = \
                   (self.ds.arr(f[pn % ax].value.astype("float64"), units)
                    for ax in "xyz")
@@ -65,7 +67,7 @@
         for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
-                    units = f[pn % "x"].attrs["units"]
+                    units = parse_h5_attr(f[pn % "x"], "units")
                     x, y, z = \
                       (self.ds.arr(f[pn % ax].value.astype("float64"), units)
                        for ax in "xyz")
@@ -86,7 +88,7 @@
         with h5py.File(data_file.filename, "r") as f:
             if not f.keys(): return None
             pos = np.empty((pcount, 3), dtype="float64")
-            units = f["particle_position_x"].attrs["units"]
+            units = parse_h5_attr(f["particle_position_x"], "units")
             dx = np.finfo(f['particle_position_x'].dtype).eps
             dx = 2.0 * self.ds.quan(dx, units).to("code_length")
             pos[:,0] = f["particle_position_x"].value
@@ -115,6 +117,7 @@
     def _identify_fields(self, data_file):
         with h5py.File(data_file.filename, "r") as f:
             fields = [("halos", field) for field in f]
-            units = dict([(("halos", field), 
-                           f[field].attrs["units"]) for field in f])
+            units = dict([(("halos", field),
+                           parse_h5_attr(f[field], "units"))
+                          for field in f])
         return fields, units

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -15,6 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import glob
 import os
 import numpy as np
 import stat
@@ -378,10 +379,6 @@
         '''
         If no fluid fields are set, the code tries to set up a fluids array by hand
         '''
-        # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
-        # Find nvar
-        
-
         # TODO: copy/pasted from DomainFile; needs refactoring!
         num = os.path.basename(self.dataset.parameter_filename).split("."
                 )[0].split("_")[1]
@@ -408,32 +405,43 @@
         self.ds.gamma = hvals['gamma']
         nvar = hvals['nvar']
         # OK, we got NVAR, now set up the arrays depending on what NVAR is
+        # but first check for radiative transfer!    
+        foldername  = os.path.abspath(os.path.dirname(self.ds.parameter_filename))
+        rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
+        if rt_flag: # rt run
+            if nvar < 10:
+                mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
+                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
+            else:
+                mylog.info('Detected RAMSES-RT file WITH IR trapping.')
+                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR", "Pressure", "Metallicity", "HII", "HeII", "HeIII"]     
+        else:            
+            if nvar < 5:
+                mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+                raise ValueError
+            # Basic hydro runs
+            if nvar == 5:
+                fields = ["Density",
+                          "x-velocity", "y-velocity", "z-velocity", 
+                          "Pressure"]
+            if nvar > 5 and nvar < 11:
+                fields = ["Density", 
+                          "x-velocity", "y-velocity", "z-velocity", 
+                          "Pressure", "Metallicity"]
+            # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+            if nvar == 11:
+                fields = ["Density", 
+                          "x-velocity", "y-velocity", "z-velocity", 
+                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+                          "Pressure"]
+            if nvar > 11:
+                fields = ["Density", 
+                          "x-velocity", "y-velocity", "z-velocity", 
+                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+                          "Pressure","Metallicity"]
         # Allow some wiggle room for users to add too many variables
-        if nvar < 5:
-            mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
-            raise ValueError
-        # Basic hydro runs
-        if nvar == 5:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "Pressure"]
-        if nvar > 5 and nvar < 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "Pressure", "Metallicity"]
-        # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
-        if nvar == 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
-                      "Pressure"]
-        if nvar > 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
-                      "Pressure","Metallicity"]
         while len(fields) < nvar:
             fields.append("var"+str(len(fields)))
         mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -13,6 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import glob
 import os
 import numpy as np
 
@@ -67,8 +68,12 @@
         ("x-velocity", (vel_units, ["velocity_x"], None)),
         ("y-velocity", (vel_units, ["velocity_y"], None)),
         ("z-velocity", (vel_units, ["velocity_z"], None)),
+        ("Pres_IR", (pressure_units, ["pres_IR"], None)),
         ("Pressure", (pressure_units, ["pressure"], None)),
         ("Metallicity", ("", ["metallicity"], None)),
+        ("HII",  ("", ["H_p1_fraction"], None)),
+        ("HeII", ("", ["He_p1_fraction"], None)),
+        ("HeIII",("", ["He_p2_fraction"], None)),
     )
     known_particle_fields = (
         ("particle_position_x", ("code_length", [], None)),
@@ -92,6 +97,33 @@
         self.add_field(("gas", "temperature"), sampling_type="cell",  function=_temperature,
                         units=self.ds.unit_system["temperature"])
         self.create_cooling_fields()
+        # See if we need to load the rt fields
+        foldername  = os.path.abspath(os.path.dirname(self.ds.parameter_filename))
+        rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
+        if rt_flag: # rt run
+            self.setup_rt_fields()
+
+    def setup_rt_fields(self):
+        def _temp_IR(field, data):
+            rv = data["gas", "pres_IR"]/data["gas", "density"]
+            rv *= mass_hydrogen_cgs/boltzmann_constant_cgs
+            return rv
+        self.add_field(("gas", "temp_IR"), sampling_type="cell",
+                       function=_temp_IR,
+                       units=self.ds.unit_system["temperature"])
+        for species in ['H_p1', 'He_p1', 'He_p2']:
+            def _species_density(field, data):
+                return data['gas', species+'_fraction']*data['gas', 'density']
+            self.add_field(('gas', species+'_density'), sampling_type='cell',
+                           function=_species_density,
+                           units=self.ds.unit_system['density'])
+            def _species_mass(field, data):
+                return (data['gas', species+'_density']*
+                        data['index', 'cell_volume'])
+            self.add_field(('gas', species+'_mass'), sampling_type='cell',
+                           function=_species_mass,
+                           units=self.ds.unit_system['mass'])
+
 
     def create_cooling_fields(self):
         num = os.path.basename(self.ds.parameter_filename).split("."

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -126,3 +126,31 @@
     dd = ds.all_data()
     families = dd[('all', 'family')]
     assert all(families == 100)
+
+ramses_rt = "ramses_rt_00088/output_00088/info_00088.txt"
+ at requires_file(ramses_rt)
+def test_ramses_rt():
+    ds = yt.load(ramses_rt)
+    ad = ds.all_data()
+
+    expected_fields = ["Density", "x-velocity", "y-velocity", "z-velocity",
+                       "Pres_IR", "Pressure", "Metallicity", "HII", "HeII",
+                       "HeIII"]
+
+    for field in expected_fields:
+        assert(('ramses', field) in ds.field_list)
+
+        # test that field access works
+        ad['ramses', field]
+
+    # test that special derived fields for RT datasets work
+    special_fields = [('gas', 'temp_IR')]
+    species = ['H_p1', 'He_p1', 'He_p2']
+    for specie in species:
+        special_fields.extend(
+            [('gas', specie+'_fraction'), ('gas', specie+'_density'),
+             ('gas', specie+'_mass')])
+
+    for field in special_fields:
+        assert(field in ds.derived_field_list)
+        ad[field]

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -735,7 +735,13 @@
     handler.domain_left_edge = domain_left_edge
     handler.domain_right_edge = domain_right_edge
     handler.refine_by = 2
-    handler.dimensionality = 3
+    if np.all(domain_dimensions[1:] == 1):
+        dimensionality = 1
+    elif domain_dimensions[2] == 1:
+        dimensionality = 2
+    else:
+        dimensionality = 3
+    handler.dimensionality = dimensionality
     handler.domain_dimensions = domain_dimensions
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
@@ -925,7 +931,13 @@
     handler.domain_left_edge = domain_left_edge
     handler.domain_right_edge = domain_right_edge
     handler.refine_by = refine_by
-    handler.dimensionality = 3
+    if np.all(domain_dimensions[1:] == 1):
+        dimensionality = 1
+    elif domain_dimensions[2] == 1:
+        dimensionality = 2
+    else:
+        dimensionality = 3
+    handler.dimensionality = dimensionality
     handler.domain_dimensions = domain_dimensions
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -22,12 +22,48 @@
     cartesian_to_cylindrical, \
     cylindrical_to_cartesian
 from yt.funcs import mylog
+from yt.units.yt_array import uvstack, YTArray
 from yt.utilities.lib.pixelization_routines import \
     pixelize_element_mesh, pixelize_off_axis_cartesian, \
-    pixelize_cartesian, pixelize_cartesian_nodal
+    pixelize_cartesian, pixelize_cartesian_nodal, \
+    pixelize_element_mesh_line
 from yt.data_objects.unstructured_mesh import SemiStructuredMesh
 from yt.utilities.nodal_data_utils import get_nodal_data
 
+def _sample_ray(ray, npoints, field):
+    """
+    Private function that uses a ray object for calculating the field values
+    that will be the y-axis values in a LinePlot object.
+
+    Parameters
+    ----------
+    ray : YTOrthoRay, YTRay, or LightRay
+        Ray object from which to sample field values
+    npoints : int
+        The number of points to sample
+    field : str or field tuple
+        The name of the field to sample
+    """
+    start_point = ray.start_point
+    end_point = ray.end_point
+    sample_dr = (end_point - start_point)/(npoints-1)
+    sample_points = [np.arange(npoints)*sample_dr[i] for i in range(3)]
+    sample_points = uvstack(sample_points).T + start_point
+    ray_coordinates = uvstack([ray[d] for d in 'xyz']).T
+    ray_dds = uvstack([ray['d'+d] for d in 'xyz']).T
+    ray_field = ray[field]
+    field_values = ray.ds.arr(np.zeros(npoints), ray_field.units)
+    for i, sample_point in enumerate(sample_points):
+        ray_contains = ((sample_point >= (ray_coordinates - ray_dds/2)) &
+                        (sample_point <= (ray_coordinates + ray_dds/2)))
+        ray_contains = ray_contains.all(axis=-1)
+        # use argmax to find the first nonzero index, sometimes there
+        # are two indices if the sampling point happens to fall exactly at
+        # a cell boundary
+        field_values[i] = ray_field[np.argmax(ray_contains)]
+    dr = np.sqrt((sample_dr**2).sum())
+    x = np.arange(npoints)/(npoints-1)*(dr*npoints)
+    return x, field_values
 
 class CartesianCoordinateHandler(CoordinateHandler):
     name = "cartesian"
@@ -65,6 +101,11 @@
 
     def pixelize(self, dimension, data_source, field, bounds, size,
                  antialias = True, periodic = True):
+        """
+        Method for pixelizing datasets in preparation for
+        two-dimensional image plots. Relies on several sampling
+        routines written in cython
+        """
         index = data_source.ds.index
         if (hasattr(index, 'meshes') and
            not isinstance(index.meshes[0], SemiStructuredMesh)):
@@ -119,6 +160,57 @@
             return self._oblique_pixelize(data_source, field, bounds, size,
                                           antialias)
 
+
+    def pixelize_line(self, field, start_point, end_point, npoints):
+        """
+        Method for sampling datasets along a line in preparation for
+        one-dimensional line plots. For UnstructuredMesh, relies on a
+        sampling routine written in cython
+        """
+        if npoints < 2:
+            raise ValueError("Must have at least two sample points in order "
+                             "to draw a line plot.")
+        index = self.ds.index
+        if (hasattr(index, 'meshes') and
+           not isinstance(index.meshes[0], SemiStructuredMesh)):
+            ftype, fname = field
+            if ftype == "all":
+                mesh_id = 0
+                indices = np.concatenate([mesh.connectivity_indices for mesh in index.mesh_union])
+            else:
+                mesh_id = int(ftype[-1]) - 1
+                indices = index.meshes[mesh_id].connectivity_indices
+
+            coords = index.meshes[mesh_id].connectivity_coords
+            if coords.shape[1] != end_point.size != start_point.size:
+                raise ValueError("The coordinate dimension doesn't match the "
+                                 "start and end point dimensions.")
+
+
+            offset = index.meshes[mesh_id]._index_offset
+            ad = self.ds.all_data()
+            field_data = ad[field]
+            if field_data.shape[1] == 27:
+                # hexahedral
+                mylog.warning("High order elements not yet supported, " +
+                              "dropping to 1st order.")
+                field_data = field_data[:, 0:8]
+                indices = indices[:, 0:8]
+
+            arc_length, plot_values = pixelize_element_mesh_line(coords, indices,
+                                                                 start_point,
+                                                                 end_point,
+                                                                 npoints, field_data,
+                                                                 index_offset=offset)
+            arc_length = YTArray(arc_length, start_point.units)
+            plot_values = YTArray(plot_values, field_data.units)
+        else:
+            ray = self.ds.ray(start_point, end_point)
+            arc_length, plot_values = _sample_ray(ray, npoints, field)
+        return arc_length, plot_values
+
+
+
     def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
                         dim, periodic):
         # We should be using fcoords
@@ -129,13 +221,13 @@
             period = period.in_units("code_length").d
 
         buff = np.zeros((size[1], size[0]), dtype="f8")
-        
+
         finfo = self.ds._get_field_info(field)
         nodal_flag = finfo.nodal_flag
         if np.any(nodal_flag):
             nodal_data = get_nodal_data(data_source, field)
             coord = data_source.coord.d
-            pixelize_cartesian_nodal(buff, 
+            pixelize_cartesian_nodal(buff,
                                      data_source['px'], data_source['py'], data_source['pz'],
                                      data_source['pdx'], data_source['pdy'], data_source['pdz'],
                                      nodal_data, coord, bounds, int(antialias),
@@ -147,7 +239,7 @@
                                bounds, int(antialias),
                                period, int(periodic))
         return buff
-            
+
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
         indices = np.argsort(data_source['pdx'])[::-1].astype(np.int_)
         buff = np.zeros((size[1], size[0]), dtype="f8")

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/geometry/coordinates/coordinate_handler.py
--- a/yt/geometry/coordinates/coordinate_handler.py
+++ b/yt/geometry/coordinates/coordinate_handler.py
@@ -72,7 +72,7 @@
 
 class CoordinateHandler(object):
     name = None
-    
+
     def __init__(self, ds, ordering):
         self.ds = weakref.proxy(ds)
         self.axis_order = ordering
@@ -86,6 +86,9 @@
         # pixelizer
         raise NotImplementedError
 
+    def pixelize_line(self, field, start_point, end_point, npoints):
+        raise NotImplementedError
+
     def distance(self, start, end):
         p1 = self.convert_to_cartesian(start)
         p2 = self.convert_to_cartesian(end)
@@ -265,4 +268,3 @@
     c2[...,1] = np.sin(coord[...,0]) * coord[...,1] + center[1]
     c2[...,2] = coord[...,2]
     return c2
-

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1426,7 +1426,8 @@
         cdef np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask
         cdef int i
         cdef int total = 0
-        cdef IntegrationAccumulator ia
+        cdef IntegrationAccumulator *ia
+        ia = <IntegrationAccumulator *> malloc(sizeof(IntegrationAccumulator))
         cdef VolumeContainer vc
         mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
         t = np.zeros(gobj.ActiveDimensions, dtype="float64")
@@ -1445,13 +1446,14 @@
             vc.dds[i] = gobj.dds[i]
             vc.idds[i] = 1.0/gobj.dds[i]
             vc.dims[i] = dt.shape[i]
-        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
+        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> ia)
         for i in range(dt.shape[0]):
             for j in range(dt.shape[1]):
                 for k in range(dt.shape[2]):
                     if dt[i, j, k] >= 0:
                         mask[i, j, k] = 1
                         total += 1
+        free(ia)
         if total == 0: return None
         return mask.astype("bool")
 
@@ -1463,7 +1465,8 @@
         cdef np.ndarray[np.float64_t, ndim=1] tr, dtr
         cdef np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask
         cdef int i, j, k, ni
-        cdef IntegrationAccumulator ia
+        cdef IntegrationAccumulator *ia
+        ia = <IntegrationAccumulator *> malloc(sizeof(IntegrationAccumulator))
         cdef VolumeContainer vc
         t = np.zeros(gobj.ActiveDimensions, dtype="float64")
         dt = np.zeros(gobj.ActiveDimensions, dtype="float64") - 1
@@ -1481,7 +1484,7 @@
             vc.dds[i] = gobj.dds[i]
             vc.idds[i] = 1.0/gobj.dds[i]
             vc.dims[i] = dt.shape[i]
-        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
+        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> ia)
         tr = np.zeros(ia.hits, dtype="float64")
         dtr = np.zeros(ia.hits, dtype="float64")
         ni = 0
@@ -1494,6 +1497,7 @@
                         ni += 1
         if not (ni == ia.hits):
             print ni, ia.hits
+        free(ia)
         return dtr, tr
 
     @cython.boundscheck(False)
@@ -1507,7 +1511,8 @@
         cdef np.float64_t LE[3]
         cdef np.float64_t RE[3]
         cdef np.float64_t pos
-        cdef IntegrationAccumulator ia
+        cdef IntegrationAccumulator *ia
+        ia = <IntegrationAccumulator *> malloc(sizeof(IntegrationAccumulator))
         cdef np.ndarray[np.float64_t, ndim=2] coords
         cdef np.ndarray[np.int64_t, ndim=2] indices
         indices = mesh.connectivity_indices
@@ -1543,11 +1548,12 @@
                 vc.idds[j] = 1.0/vc.dds[j]
                 vc.dims[j] = 1
             t[0,0,0] = dt[0,0,0] = -1
-            walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
+            walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> ia)
             if dt[0,0,0] >= 0:
                 tr[ni] = t[0,0,0]
                 dtr[ni] = dt[0,0,0]
                 ni += 1
+        free(ia)
         return dtr, tr
 
     cdef int select_point(self, np.float64_t pos[3]) nogil:
@@ -1563,26 +1569,30 @@
     @cython.cdivision(True)
     cdef int select_bbox(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil:
-        cdef int i
-        cdef np.uint8_t cm = 1
+        cdef int i, rv
         cdef VolumeContainer vc
-        cdef IntegrationAccumulator ia
-        cdef np.float64_t dt, t
+        cdef IntegrationAccumulator *ia
+        ia = <IntegrationAccumulator *> malloc(sizeof(IntegrationAccumulator))
+        cdef np.float64_t dt[1], t[1]
+        cdef np.uint8_t cm[1]
         for i in range(3):
             vc.left_edge[i] = left_edge[i]
             vc.right_edge[i] = right_edge[i]
             vc.dds[i] = right_edge[i] - left_edge[i]
             vc.idds[i] = 1.0/vc.dds[i]
             vc.dims[i] = 1
-        t = dt = 0.0
-        ia.t = &t
-        ia.dt = &dt
-        ia.child_mask = &cm
+        t[0] = dt[0] = 0.0
+        cm[0] = 1
+        ia.t = t
+        ia.dt = dt
+        ia.child_mask = cm
         ia.hits = 0
-        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
+        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> ia)
+        rv = 0
         if ia.hits > 0:
-            return 1
-        return 0
+            rv = 1
+        free(ia)
+        return rv
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -43,6 +43,7 @@
 from yt.extern.six import add_metaclass, string_types
 from yt.extern.six.moves import urllib, input
 from yt.extern.six.moves.urllib.parse import urlparse
+from yt.extern.tqdm import tqdm
 from yt.convenience import load
 from yt.visualization.plot_window import \
     SlicePlot, \
@@ -50,7 +51,7 @@
 from yt.utilities.metadata import get_metadata
 from yt.utilities.configure import set_config
 from yt.utilities.exceptions import \
-    YTOutputNotIdentified, YTFieldNotParseable
+    YTOutputNotIdentified, YTFieldNotParseable, YTCommandRequiresModule
 
 # loading field plugins for backward compatibility, since this module
 # used to do "from yt.mods import *"
@@ -135,10 +136,7 @@
     try:
         import girder_client
     except ImportError:
-        print("this command requires girder_client to be installed")
-        print("Please install them using your python package manager, e.g.:")
-        print("   pip install girder_client --user")
-        sys.exit()
+        raise YTCommandRequiresModule('girder_client')
     if not ytcfg.get("yt", "hub_api_key"):
         print("Before you can access the yt Hub you need an API key")
         print("In order to obtain one, either register by typing:")
@@ -152,6 +150,25 @@
     return gc
 
 
+class FileStreamer:
+    final_size = None
+    next_sent = 0
+    chunksize = 100*1024
+
+    def __init__(self, f, final_size = None):
+        location = f.tell()
+        f.seek(0, os.SEEK_END)
+        self.final_size = f.tell() - location
+        f.seek(location)
+        self.f = f
+
+    def __iter__(self):
+        with tqdm(total=self.final_size, desc='Uploading file',
+                  unit='B', unit_scale=True) as pbar:
+            while self.f.tell() < self.final_size:
+                yield self.f.read(self.chunksize)
+                pbar.update(self.chunksize)
+
 _subparsers = {None: subparsers}
 _subparsers_description = {
     'config': 'Get and set configuration values for yt',
@@ -572,10 +589,7 @@
         try:
             import requests
         except ImportError:
-            print("yt {} requires requests to be installed".format(self.name))
-            print("Please install them using your python package manager, e.g.:")
-            print("   pip install requests --user")
-            sys.exit()
+            raise YTCommandRequiresModule('requests')
         if ytcfg.get("yt", "hub_api_key") != "":
             print("You seem to already have an API key for the hub in")
             print("{} . Delete this if you want to force a".format(CURRENT_CONFIG_FILE))
@@ -1159,6 +1173,29 @@
             pprint.pprint(rv)
 
 
+class YTUploadFileCmd(YTCommand):
+    args = (dict(short="file", type=str),)
+    description = \
+        """
+        Upload a file to yt's curldrop.
+
+        """
+    name = "upload"
+
+    def __call__(self, args):
+        try:
+            import requests
+        except ImportError:
+            raise YTCommandRequiresModule('requests')
+
+        fs = iter(FileStreamer(open(args.file, 'rb')))
+        upload_url = ytcfg.get("yt", "curldrop_upload_url")
+        r = requests.put(upload_url + "/" + os.path.basename(args.file),
+                         data=fs)
+        print()
+        print(r.text)
+
+
 class YTConfigGetCmd(YTCommand):
     subparser = 'config'
     name = 'get'

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -699,3 +699,16 @@
             if shape != self.grid_dims:
                 msg += "    Field {} has shape {}.\n".format(name, shape)
         return msg
+
+class YTCommandRequiresModule(YTException):
+    def __init__(self, module):
+        self.module = module
+
+    def __str__(self):
+        msg = "This command requires \"%s\" to be installed.\n\n" % self.module
+        msg += "Please install \"%s\" with the package manager " % self.module
+        msg += "appropriate for your python environment, e.g.:\n"
+        msg += "  conda install %s\n" % self.module
+        msg += "or:\n"
+        msg += "  pip install %s\n" % self.module
+        return msg

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/utilities/lib/fixed_interpolator.pxd
--- a/yt/utilities/lib/fixed_interpolator.pxd
+++ b/yt/utilities/lib/fixed_interpolator.pxd
@@ -30,4 +30,3 @@
                        np.float64_t vl[3], np.float64_t dds[3],
                        np.float64_t x, np.float64_t y, np.float64_t z,
                        int vind1, int vind2) nogil
-

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -119,7 +119,7 @@
     # (lr) and then iterate up to "right column" (rc) and "uppeR row" (rr),
     # depositing into them the data value.  Overlap computes the relative
     # overlap of a data value with a pixel.
-    # 
+    #
     # NOTE ON ROWS AND COLUMNS:
     #
     #   The way that images are plotting in matplotlib is somewhat different
@@ -254,7 +254,7 @@
     cdef int lc, lr, rc, rr
     cdef np.float64_t lypx, rypx, lxpx, rxpx, overlap1, overlap2
     # These are the temp vars we get from the arrays
-    cdef np.float64_t oxsp, oysp, ozsp 
+    cdef np.float64_t oxsp, oysp, ozsp
     cdef np.float64_t xsp, ysp, zsp
     cdef np.float64_t dxsp, dysp, dzsp
     # Some periodicity helpers
@@ -303,7 +303,7 @@
     # (lr) and then iterate up to "right column" (rc) and "uppeR row" (rr),
     # depositing into them the data value.  Overlap computes the relative
     # overlap of a data value with a pixel.
-    # 
+    #
     # NOTE ON ROWS AND COLUMNS:
     #
     #   The way that images are plotting in matplotlib is somewhat different
@@ -497,7 +497,7 @@
     cdef np.float64_t r_i, theta_i, dr_i, dtheta_i, dthetamin
     cdef np.float64_t costheta, sintheta
     cdef int i, pi, pj
-    
+
     cdef int imax = np.asarray(radius).argmax()
     rmax = radius[imax] + dradius[imax]
 
@@ -862,3 +862,107 @@
     free(vertices)
     free(field_vals)
     return img
+
+def pixelize_element_mesh_line(np.ndarray[np.float64_t, ndim=2] coords,
+                               np.ndarray[np.int64_t, ndim=2] conn,
+                               np.ndarray[np.float64_t, ndim=1] start_point,
+                               np.ndarray[np.float64_t, ndim=1] end_point,
+                               npoints,
+                               np.ndarray[np.float64_t, ndim=2] field,
+                               int index_offset = 0):
+
+    # This routine chooses the correct element sampler to interpolate field
+    # values at evenly spaced points along a sampling line
+    cdef np.float64_t *vertices
+    cdef np.float64_t *field_vals
+    cdef int nvertices = conn.shape[1]
+    cdef int ndim = coords.shape[1]
+    cdef int num_field_vals = field.shape[1]
+    cdef int num_plot_nodes = npoints
+    cdef int num_intervals = npoints - 1
+    cdef double[4] mapped_coord
+    cdef ElementSampler sampler
+    cdef np.ndarray[np.float64_t, ndim=1] lin_vec
+    cdef np.ndarray[np.float64_t, ndim=1] lin_inc
+    cdef np.ndarray[np.float64_t, ndim=2] lin_sample_points
+    cdef np.int64_t i, n, j, k
+    cdef np.ndarray[np.float64_t, ndim=1] arc_length
+    cdef np.float64_t lin_length, inc_length
+    cdef np.ndarray[np.float64_t, ndim=1] plot_values
+    cdef np.float64_t sample_point[3]
+
+    lin_vec = np.zeros(ndim, dtype="float64")
+    lin_inc = np.zeros(ndim, dtype="float64")
+
+    lin_sample_points = np.zeros((num_plot_nodes, ndim), dtype="float64")
+    arc_length = np.zeros(num_plot_nodes, dtype="float64")
+    plot_values = np.zeros(num_plot_nodes, dtype="float64")
+
+    # Pick the right sampler and allocate storage for the mapped coordinate
+    if ndim == 3 and nvertices == 4:
+        sampler = P1Sampler3D()
+    elif ndim == 3 and nvertices == 6:
+        sampler = W1Sampler3D()
+    elif ndim == 3 and nvertices == 8:
+        sampler = Q1Sampler3D()
+    elif ndim == 3 and nvertices == 20:
+        sampler = S2Sampler3D()
+    elif ndim == 2 and nvertices == 3:
+        sampler = P1Sampler2D()
+    elif ndim == 1 and nvertices == 2:
+        sampler = P1Sampler1D()
+    elif ndim == 2 and nvertices == 4:
+        sampler = Q1Sampler2D()
+    elif ndim == 2 and nvertices == 9:
+        sampler = Q2Sampler2D()
+    elif ndim == 2 and nvertices == 6:
+        sampler = T2Sampler2D()
+    elif ndim == 3 and nvertices == 10:
+        sampler = Tet2Sampler3D()
+    else:
+        raise YTElementTypeNotRecognized(ndim, nvertices)
+
+    # allocate temporary storage
+    vertices = <np.float64_t *> malloc(ndim * sizeof(np.float64_t) * nvertices)
+    field_vals = <np.float64_t *> malloc(sizeof(np.float64_t) * num_field_vals)
+
+    lin_vec = end_point - start_point
+    lin_length = np.linalg.norm(lin_vec)
+    lin_inc = lin_vec / num_intervals
+    inc_length = lin_length / num_intervals
+    for j in range(ndim):
+        lin_sample_points[0, j] = start_point[j]
+    arc_length[0] = 0
+    for i in range(1, num_intervals + 1):
+        for j in range(ndim):
+            lin_sample_points[i, j] = lin_sample_points[i-1, j] + lin_inc[j]
+            arc_length[i] = arc_length[i-1] + inc_length
+
+    for i in range(num_intervals + 1):
+        for j in range(3):
+            if j < ndim:
+                sample_point[j] = lin_sample_points[i][j]
+            else:
+                sample_point[j] = 0
+        for ci in range(conn.shape[0]):
+            for n in range(num_field_vals):
+                field_vals[n] = field[ci, n]
+
+            # Fill the vertices
+            for n in range(nvertices):
+                cj = conn[ci, n] - index_offset
+                for k in range(ndim):
+                    vertices[ndim*n + k] = coords[cj, k]
+
+            sampler.map_real_to_unit(mapped_coord, vertices, sample_point)
+            if not sampler.check_inside(mapped_coord) and ci != conn.shape[0] - 1:
+                continue
+            elif not sampler.check_inside(mapped_coord):
+                raise ValueError("Check to see that both starting and ending line points "
+                                 "are within the domain of the mesh.")
+            plot_values[i] = sampler.sample_at_unit_point(mapped_coord, field_vals)
+            break
+
+    free(vertices)
+    free(field_vals)
+    return arc_length, plot_values

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -51,7 +51,11 @@
     AxisAlignedSlicePlot, \
     OffAxisSlicePlot, \
     ProjectionPlot, \
-    OffAxisProjectionPlot
+    OffAxisProjectionPlot, \
+    plot_2d
+
+from .line_plot import \
+    LinePlot
 
 from .profile_plotter import \
     ProfilePlot, \

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -50,7 +50,7 @@
 
 
 class CallbackWrapper(object):
-    def __init__(self, viewer, window_plot, frb, field, font_properties, 
+    def __init__(self, viewer, window_plot, frb, field, font_properties,
                  font_color):
         self.frb = frb
         self.data = frb.data_source
@@ -86,6 +86,8 @@
         import matplotlib.figure
         self._plot_valid = True
         if figure is None:
+            if not iterable(fsize):
+                fsize = (fsize, fsize)
             self.figure = matplotlib.figure.Figure(figsize=fsize, frameon=True)
         else:
             figure.set_size_inches(fsize)
@@ -164,6 +166,8 @@
     def _get_labels(self):
         ax = self.axes
         labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()
+        labels += ax.xaxis.get_minorticklabels()
+        labels += ax.yaxis.get_minorticklabels()
         labels += [ax.title, ax.xaxis.label, ax.yaxis.label,
                    ax.xaxis.get_offset_text(), ax.yaxis.get_offset_text()]
         return labels
@@ -279,10 +283,10 @@
         x_frac_widths = xbins/size[0]
         y_frac_widths = ybins/size[1]
 
-        # axrect is the rectangle defining the area of the 
-        # axis object of the plot.  Its range goes from 0 to 1 in 
-        # x and y directions.  The first two values are the x,y 
-        # start values of the axis object (lower left corner), and the 
+        # axrect is the rectangle defining the area of the
+        # axis object of the plot.  Its range goes from 0 to 1 in
+        # x and y directions.  The first two values are the x,y
+        # start values of the axis object (lower left corner), and the
         # second two values are the size of the axis object.  To get
         # the upper right corner, add the first x,y to the second x,y.
         axrect = (
@@ -452,5 +456,3 @@
             ax.clear()
             cbars.append(ax)
     return fig, tr, cbars
-
-

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/visualization/fits_image.py
--- a/yt/visualization/fits_image.py
+++ b/yt/visualization/fits_image.py
@@ -11,6 +11,7 @@
 #-----------------------------------------------------------------------------
 from yt.extern.six import string_types
 import numpy as np
+from yt.fields.derived_field import DerivedField
 from yt.funcs import mylog, iterable, fix_axis, ensure_list
 from yt.visualization.fixed_resolution import FixedResolutionBuffer
 from yt.data_objects.construction_data_containers import YTCoveringGrid
@@ -147,25 +148,27 @@
         for fd in fields:
             if isinstance(fd, tuple):
                 self.fields.append(fd[1])
+            elif isinstance(fd, DerivedField):
+                self.fields.append(fd.name[1])
             else:
                 self.fields.append(fd)
 
         first = True
-        for key in fields:
-            if key not in exclude_fields:
-                if hasattr(img_data[key], "units"):
-                    self.field_units[key] = str(img_data[key].units)
+        for name, field in zip(self.fields, fields):
+            if name not in exclude_fields:
+                if hasattr(img_data[field], "units"):
+                    self.field_units[name] = str(img_data[field].units)
                 else:
-                    self.field_units[key] = "dimensionless"
-                mylog.info("Making a FITS image of field %s" % key)
+                    self.field_units[name] = "dimensionless"
+                mylog.info("Making a FITS image of field %s" % name)
                 if first:
-                    hdu = _astropy.pyfits.PrimaryHDU(np.array(img_data[key]))
+                    hdu = _astropy.pyfits.PrimaryHDU(np.array(img_data[field]))
                     first = False
                 else:
-                    hdu = _astropy.pyfits.ImageHDU(np.array(img_data[key]))
-                hdu.name = key
-                hdu.header["btype"] = key
-                hdu.header["bunit"] = re.sub('()', '', self.field_units[key])
+                    hdu = _astropy.pyfits.ImageHDU(np.array(img_data[field]))
+                hdu.name = name
+                hdu.header["btype"] = name
+                hdu.header["bunit"] = re.sub('()', '', self.field_units[name])
                 self.hdulist.append(hdu)
 
         self.shape = self.hdulist[0].shape

diff -r 02e1ca816bd4ef886612fd3f926561e916c43bfa -r e9c2bc5762e60286c81240db2896046e91f96009 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -30,7 +30,6 @@
 
 import numpy as np
 import weakref
-import re
 import types
 
 class FixedResolutionBuffer(object):
@@ -155,38 +154,6 @@
             if f not in exclude and f[0] not in self.data_source.ds.particle_types:
                 self[f]
 
-    def _is_ion( self, fname ):
-        p = re.compile("_p[0-9]+_")
-        result = False
-        if p.search( fname ) is not None:
-            result = True
-        return result
-
-    def _ion_to_label( self, fname ):
-        pnum2rom = {
-            "0":"I", "1":"II", "2":"III", "3":"IV", "4":"V",
-            "5":"VI", "6":"VII", "7":"VIII", "8":"IX", "9":"X",
-            "10":"XI", "11":"XII", "12":"XIII", "13":"XIV", "14":"XV",
-            "15":"XVI", "16":"XVII", "17":"XVIII", "18":"XIX", "19":"XX"}
-
-        p = re.compile("_p[0-9]+_")
-        m = p.search( fname )
-        if m is not None:
-            pstr = m.string[m.start()+1:m.end()-1]
-            segments = fname.split("_")
-            for i,s in enumerate(segments):
-                segments[i] = s.capitalize()
-                if s == pstr:
-                    ipstr = i
-            element = segments[ipstr-1]
-            roman = pnum2rom[pstr[1:]]
-            label = element + '\ ' + roman + '\ ' + \
-                '\ '.join(segments[ipstr+1:])
-        else:
-            label = fname
-        return label
-
-
     def _get_info(self, item):
         info = {}
         ftype, fname = field = self.data_source._determine_fields(item)[0]
@@ -210,18 +177,7 @@
         except AttributeError:
             pass
 
-        info['label'] = finfo.display_name
-        if info['label'] is None:
-            if self._is_ion( fname ):
-                fname = self._ion_to_label( fname )
-                info['label'] = r'$\rm{'+fname+r'}$'
-                info['label'] = r'$\rm{'+fname.replace('_','\ ')+r'}$'
-            else:
-                info['label'] = r'$\rm{'+fname+r'}$'
-                info['label'] = r'$\rm{'+fname.replace('_','\ ').title()+r'}$'
-        elif info['label'].find('$') == -1:
-            info['label'] = info['label'].replace(' ','\ ')
-            info['label'] = r'$\rm{'+info['label']+r'}$'
+        info['label'] = finfo.get_latex_display_name()
 
         return info
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/6a9dc5be6bfe/
Changeset:   6a9dc5be6bfe
User:        ngoldbaum
Date:        2017-07-17 21:35:42+00:00
Summary:     Merge pull request #1479 from cphyc/feature/force-cosmo

Feature/force cosmo
Affected #:  3 files

diff -r 91d4c5f842e13395f9f6357b996def74a8823687 -r 6a9dc5be6bfe4ad5611c291ccccc0d362378571d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1947,6 +1947,11 @@
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``into_rt_*.txt`` file in the output directory.
 
+It is possible to force yt to treat the simulation as a cosmological
+simulation by providing the ``cosmological=True`` parameter (or
+``False`` to force non-cosmology). If left to ``None``, the kind of
+the simulation is inferred from the data.
+
 .. _loading-sph-data:
 
 SPH Particle Data

diff -r 91d4c5f842e13395f9f6357b996def74a8823687 -r 6a9dc5be6bfe4ad5611c291ccccc0d362378571d yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -158,15 +158,15 @@
         self.local_particle_count = hvals['npart']
 
         particle_fields = [
-            ("particle_position_x", "d"),
-            ("particle_position_y", "d"),
-            ("particle_position_z", "d"),
-            ("particle_velocity_x", "d"),
-            ("particle_velocity_y", "d"),
-            ("particle_velocity_z", "d"),
-            ("particle_mass", "d"),
-            ("particle_identifier", "i"),
-            ("particle_refinement_level", "I")]
+                ("particle_position_x", "d"),
+                ("particle_position_y", "d"),
+                ("particle_position_z", "d"),
+                ("particle_velocity_x", "d"),
+                ("particle_velocity_y", "d"),
+                ("particle_velocity_z", "d"),
+                ("particle_mass", "d"),
+                ("particle_identifier", "i"),
+                ("particle_refinement_level", "I")]
         if hvals["nstar_tot"] > 0:
             particle_fields += [("particle_age", "d"),
                                 ("particle_metallicity", "d")]
@@ -210,7 +210,7 @@
 
     def _read_amr(self):
         """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain
+           For each oct, only the position, index, level and domain 
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
@@ -236,7 +236,7 @@
         min_level = self.ds.min_level
         # yt max level is not the same as the RAMSES one.
         # yt max level is the maximum number of additional refinement levels
-        # so for a uni grid run with no refinement, it would be 0.
+        # so for a uni grid run with no refinement, it would be 0. 
         # So we initially assume that.
         max_level = 0
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
@@ -373,7 +373,7 @@
             dsl.update(set(domain.particle_field_offsets.keys()))
         self.particle_field_list = list(dsl)
         self.field_list = [("ramses", f) for f in self.fluid_field_list] \
-                          + self.particle_field_list
+                        + self.particle_field_list
 
     def _setup_auto_fields(self):
         '''
@@ -498,9 +498,9 @@
         return {'io': npart}
 
     def print_stats(self):
-
+        
         # This function prints information based on the fluid on the grids,
-        # and therefore does not work for DM only runs.
+        # and therefore does not work for DM only runs. 
         if not self.fluid_field_list:
             print("This function is not implemented for DM only runs")
             return
@@ -540,11 +540,11 @@
     _index_class = RAMSESIndex
     _field_info_class = RAMSESFieldInfo
     gamma = 1.4 # This will get replaced on hydro_fn open
-
+    
     def __init__(self, filename, dataset_type='ramses',
-                 fields = None, storage_filename = None,
+                 fields=None, storage_filename=None,
                  units_override=None, unit_system="cgs",
-                 extra_particle_fields=None):
+                 extra_particle_fields=None, cosmological=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]
@@ -552,14 +552,18 @@
         fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
                 If set to None, will try a default set of fields
         extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file.
+        cosmological: If set to None, automatically detect cosmological simulation. If a boolean, force 
+                      its value.
         '''
         self.fluid_types += ("ramses",)
         self._fields_in_file = fields
         self._extra_particle_fields = extra_particle_fields
+        self.force_cosmological = cosmological
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
         self.storage_filename = storage_filename
 
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
 
@@ -574,7 +578,7 @@
         time_unit = self.parameters['unit_t']
 
         # calculating derived units (except velocity and temperature, done below)
-        mass_unit = density_unit * length_unit**3
+        mass_unit = density_unit * length_unit**3     
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))
         pressure_unit = density_unit * (length_unit / time_unit)**2
@@ -644,8 +648,16 @@
         self.domain_right_edge = np.ones(3, dtype='float64')
         # This is likely not true, but it's not clear how to determine the boundary conditions
         self.periodicity = (True, True, True)
-        # These conditions seem to always be true for non-cosmological datasets
-        if rheader["time"] >= 0 and rheader["H0"] == 1 and rheader["aexp"] == 1:
+
+        if self.force_cosmological is not None:
+            is_cosmological = self.force_cosmological
+        else:
+            # These conditions seem to always be true for non-cosmological datasets
+            is_cosmological = not (rheader["time"] >= 0 and
+                                   rheader["H0"] == 1 and
+                                   rheader["aexp"] == 1)
+
+        if not is_cosmological:
             self.cosmological_simulation = 0
             self.current_redshift = 0
             self.hubble_constant = 0
@@ -673,7 +685,7 @@
 
             self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
                              self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
-
+ 
             self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
 
 

diff -r 91d4c5f842e13395f9f6357b996def74a8823687 -r 6a9dc5be6bfe4ad5611c291ccccc0d362378571d yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -62,14 +62,54 @@
 
 ramsesNonCosmo = 'DICEGalaxyDisk_nonCosmological/output_00002'
 @requires_file(ramsesNonCosmo)
+def test_non_cosmo_detection():
+    path = os.path.join(ramsesNonCosmo, 'info_00002.txt')
+    ds = yt.load(path, cosmological=False)
+    assert_equal(ds.cosmological_simulation, 0)
+
+    ds = yt.load(path, cosmological=None)
+    assert_equal(ds.cosmological_simulation, 0)
+
+    ds = yt.load(path)
+    assert_equal(ds.cosmological_simulation, 0)
+
+
+ at requires_file(ramsesNonCosmo)
 def test_unit_non_cosmo():
-    ds = yt.load(os.path.join(ramsesNonCosmo, 'info_00002.txt'))
+    for force_cosmo in [False, None]:
+        ds = yt.load(os.path.join(ramsesNonCosmo, 'info_00002.txt'), cosmological=force_cosmo)
+
+        expected_raw_time = 0.0299468077820411 # in ramses unit
+        assert_equal(ds.current_time.value, expected_raw_time)
+
+        expected_time = 14087886140997.336 # in seconds
+        assert_equal(ds.current_time.in_units('s').value, expected_time)
+
 
-    expected_raw_time = 0.0299468077820411 # in ramses unit
-    assert_equal(ds.current_time.value, expected_raw_time)
+ramsesCosmo = 'output_00080/info_00080.txt'
+ at requires_file(ramsesCosmo)
+def test_cosmo_detection():
+    ds = yt.load(ramsesCosmo, cosmological=True)
+    assert_equal(ds.cosmological_simulation, 1)
+
+    ds = yt.load(ramsesCosmo, cosmological=None)
+    assert_equal(ds.cosmological_simulation, 1)
+
+    ds = yt.load(ramsesCosmo)
+    assert_equal(ds.cosmological_simulation, 1)
 
-    expected_time = 14087886140997.336 # in seconds
-    assert_equal(ds.current_time.in_units('s').value, expected_time)
+
+ at requires_file(ramsesCosmo)
+def test_unit_cosmo():
+    for force_cosmo in [True, None]:
+        ds = yt.load(ramsesCosmo, cosmological=force_cosmo)
+
+        expected_raw_time = 1.119216564055017 # in ramses unit
+        assert_equal(ds.current_time.value, expected_raw_time)
+
+        expected_time = 3.756241729312462e+17 # in seconds
+        assert_equal(ds.current_time.in_units('s').value, expected_time)
+
 
 ramsesExtraFieldsSmall = 'ramses_extra_fields_small/output_00001'
 @requires_file(ramsesExtraFieldsSmall)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list