[yt-svn] commit/yt: 2 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Apr 19 17:14:46 PDT 2014


2 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/6fa87627d9be/
Changeset:   6fa87627d9be
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-16 21:57:35
Summary:     Merging into species plugin
Affected #:  98 files

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,3 +5160,4 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,4 +7,9 @@
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
 prune doc/build/
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn
 recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -15,8 +15,13 @@
     required_arguments = 1
     optional_arguments = 1
     option_spec = {'skip_exceptions' : directives.flag}
+    final_argument_whitespace = True
 
-    def run(self):
+    def run(self): # check if there are spaces in the notebook name
+        nb_path = self.arguments[0]
+        if ' ' in nb_path: raise ValueError(
+            "Due to issues with docutils stripping spaces from links, white "
+            "space is not allowed in notebook filenames '{0}'".format(nb_path))
         # check if raw html is supported
         if not self.state.document.settings.raw_enabled:
             raise self.warning('"%s" directive disabled.' % self.name)
@@ -24,10 +29,11 @@
         # get path to notebook
         source_dir = os.path.dirname(
             os.path.abspath(self.state.document.current_source))
-        nb_basename = os.path.basename(self.arguments[0])
+        nb_filename = self.arguments[0]
+        nb_basename = os.path.basename(nb_filename)
         rst_file = self.state_machine.document.attributes['source']
         rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.join(rst_dir, nb_basename)
+        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
 
         # Move files around.
         rel_dir = os.path.relpath(rst_dir, setup.confdir)
@@ -89,7 +95,6 @@
         return [nb_node]
 
 
-
 class notebook_node(nodes.raw):
     pass
 
@@ -109,6 +114,7 @@
     # http://imgur.com/eR9bMRH
     header = header.replace('<style', '<style scoped="scoped"')
     header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n', '')
+    header = header.replace("code,pre{", "code{")
 
     # Filter out styles that conflict with the sphinx theme.
     filter_strings = [
@@ -120,8 +126,16 @@
     ]
     filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
 
+    line_begin_strings = [
+        'pre{',
+        'p{margin'
+        ]
+
     header_lines = filter(
         lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
+    header_lines = filter(
+        lambda x: not any([x.startswith(s) for s in line_begin_strings]), header_lines)
+
     header = '\n'.join(header_lines)
 
     # concatenate raw html lines

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:52f186664831f5290b31ec433114927b9771e224bd79d0c82dd3d9a8d9c09bf6"
+  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -307,7 +307,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`:"
+      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`.  You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity."
      ]
     },
     {
@@ -402,11 +402,13 @@
       "\n",
       "print a/b\n",
       "print (a/b).in_cgs()\n",
+      "print (a/b).in_mks()\n",
       "print (a/b).in_units('km/s')\n",
       "print ''\n",
       "\n",
       "print a*b\n",
-      "print (a*b).in_cgs()"
+      "print (a*b).in_cgs()\n",
+      "print (a*b).in_mks()"
      ],
      "language": "python",
      "metadata": {},
@@ -433,7 +435,10 @@
       "from yt.utilities.physical_constants import G, kboltz\n",
       "\n",
       "print \"Newton's constant: \", G\n",
-      "print \"Boltzmann constant: \", kboltz"
+      "print \"Newton's constant in MKS: \", G.in_mks(), \"\\n\"\n",
+      "\n",
+      "print \"Boltzmann constant: \", kboltz\n",
+      "print \"Boltzmann constant in MKS: \", kboltz.in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8e1a5db9e3869bcf761ff39c5a95d21458b7c4205f00da3d3f973d398422a466"
+  "signature": "sha256:9e7ac626b3609cf5f3fb2d4ebc6e027ed923ab1c22f0acc212e42fc7535e3205"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -73,6 +73,7 @@
       "mass = dd['cell_mass']\n",
       "\n",
       "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
+      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
       "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
       "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
      ],
@@ -87,6 +88,7 @@
       "dx = dd['dx']\n",
       "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
       "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
+      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
       "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
      ],
      "language": "python",
@@ -109,8 +111,10 @@
       "\n",
       "* `in_units`\n",
       "* `in_cgs`\n",
+      "* `in_mks`\n",
       "* `convert_to_units`\n",
-      "* `convert_to_cgs`"
+      "* `convert_to_cgs`\n",
+      "* `convert_to_mks`"
      ]
     },
     {
@@ -134,15 +138,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The second, `in_cgs`, returns a copy of the array converted into the base units of yt's CGS unit system:"
+      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print (dd['pressure']/dd['density'])\n",
-      "print (dd['pressure']/dd['density']).in_cgs()"
+      "print (dd['pressure'])\n",
+      "print (dd['pressure']).in_cgs()\n",
+      "print (dd['pressure']).in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:a07224c25b1d938bc1014b6d9d09c1a2392912f21b821b07615e65302677ef9b"
+  "signature": "sha256:242d7005d45a82744713bfe6389e49d47f39b524d1e7fcbf5ceb2e65dc473e68"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,77 +20,6 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "The unit registry"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "When a dataset is loaded, we attempt to detect and assign conversion factors from the internal simulation coordinate system and the physical CGS system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.mods import *\n",
-      "\n",
-      "ds = load('Enzo_64/DD0043/data0043')\n",
-      "\n",
-      "ds.unit_registry"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.unit_registry.lut"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "\n",
-      "It is not necessary to specify a unit registry when creating a new `YTArray` or `YTQuantity` since `yt` ships with a default unit registry:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units.unit_object import default_unit_registry as reg\n",
-      "\n",
-      "unit_names = reg.lut.keys()\n",
-      "unit_names.sort()\n",
-      "\n",
-      "# Print out the first 10 unit names\n",
-      "for i in range(10):\n",
-      "    print unit_names[i], reg.lut[unit_names[i]]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Each entry in the lookup table is the string name of a base unit and a tuple containing the CGS conversion factor and dimensions of the unit symbol."
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
       "Code units"
      ]
     },
@@ -98,25 +27,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Some of the most interesting unit symbols are the ones for \"code\" units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "code_unit_names = [un for un in unit_names if 'code_' in un]\n",
-      "\n",
-      "print code_unit_names"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:"
      ]
     },
@@ -132,13 +42,22 @@
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object.  Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
       "reg = ds.unit_registry\n",
       "\n",
-      "for un in code_unit_names:\n",
-      "    print un, reg.lut[un]"
+      "for un in reg.keys():\n",
+      "    if un.startswith('code_'):\n",
+      "        fmt_tup = (un, reg.lut[un][0], reg.lut[un][1])\n",
+      "        print \"Unit name:      {:<15}\\nCGS conversion: {:<15}\\nDimensions:     {:<15}\\n\".format(*fmt_tup)"
      ],
      "language": "python",
      "metadata": {},
@@ -295,6 +214,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "The unit registry"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When you create a `YTArray` without referring to a unit registry, `yt` uses the default unit registry, which does not include code units or comoving units."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = YTQuantity(3, 'cm')\n",
+      "\n",
+      "print a.units.registry.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When a dataset is loaded, `yt` infers conversion factors from the internal simulation unit system to the CGS unit system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols.  For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n",
+      "\n",
+      "* `ds.arr()`\n",
+      "* `ds.quan()`\n",
+      "\n",
+      "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units.  For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = ds.quan(3, 'code_length')\n",
+      "\n",
+      "print a\n",
+      "print a.in_cgs()\n",
+      "print a.in_units('Mpccm/h')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "b = ds.arr([3, 4, 5], 'Mpccm/h')\n",
+      "print b\n",
+      "print b.in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -59,7 +59,7 @@
 master_doc = 'index'
 
 # General information about the project.
-project = u'yt'
+project = u'The yt Project'
 copyright = u'2013, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
@@ -119,11 +119,16 @@
 # documentation.
 html_theme_options = dict(
     bootstrap_version = "3",
-    bootswatch_theme = "readable"
+    bootswatch_theme = "readable",
+    navbar_links = [
+        ("How to get help", "help/index"),
+        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Cookbook", "cookbook/index"),
+        ],
+    navbar_sidebarrel = False,
+    globaltoc_depth = 2,
 )
 
-#html_style = "agogo_yt.css"
-
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -43,7 +43,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # This looks better.  Now let's try turning on opacity.
@@ -56,7 +56,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v3.png", clip_ratio=6.0)
 
 # This looks pretty good, now lets go back to the full resolution AMRKDTree

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -19,8 +19,8 @@
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**na.floor(na.log10(data_source[field]).min()  )
-c_max = 10**na.floor(na.log10(data_source[field]).max()+1)
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # keep only clumps with at least 20 cells
 function = 'self.data[\'%s\'].size > 20' % field

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/cookbook/multi_plot_slice_and_proj.py
--- a/doc/source/cookbook/multi_plot_slice_and_proj.py
+++ b/doc/source/cookbook/multi_plot_slice_and_proj.py
@@ -1,4 +1,5 @@
 from yt.mods import * # set up our namespace
+from yt.visualization.base_plot_types import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
 
@@ -18,7 +19,7 @@
 
 slc = pf.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
                  center=pf.domain_center)
-proj = pf.proj(2, "density", weight_field="density", center=pf.domain_center)
+proj = pf.proj("density", 2, weight_field="density", center=pf.domain_center)
 
 slc_frb = slc.to_frb((1.0, "mpc"), 512)
 proj_frb = proj.to_frb((1.0, "mpc"), 512)

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -31,4 +31,4 @@
 # relating to what our dataset is called.
 # We save the log of the values so that the colors do not span
 # many orders of magnitude.  Try it without and see what happens.
-write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+write_image(np.log10(image), "%s_offaxis_projection.png" % pf)

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -21,13 +21,13 @@
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
 cam.snapshot("v1.png", clip_ratio=6.0)
 
-# In this case, the default alphas used (na.logspace(-3,0,Nbins)) does not
+# In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.logspace(0,0,4), colormap = 'RdBu_r')
+        alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
@@ -40,14 +40,14 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v4.png", clip_ratio=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=30.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v5.png", clip_ratio=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
@@ -55,7 +55,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=100.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v6.png", clip_ratio=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -12,7 +12,7 @@
 
 # Create a transfer function to map field values to colors.
 # We bump up our minimum to cut out some of the background fluid
-tf = ColorTransferFunction((na.log10(mi)+2.0, na.log10(ma)))
+tf = ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
 
 # Add three guassians, evenly spaced between the min and
 # max specified above with widths of 0.02 and using the

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ b/doc/source/cookbook/save_profiles.py
@@ -33,7 +33,7 @@
 # separate columns into separate NumPy arrays, it is essential to set unpack=True.
 
 r, dens, std_dens, temp, std_temp = \
-	na.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
+	np.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
 
 fig1 = plt.figure()
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/cookbook/simple_slice_matplotlib_example.py
--- a/doc/source/cookbook/simple_slice_matplotlib_example.py
+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py
@@ -21,7 +21,7 @@
 rect = (0.2,0.2,0.2,0.2)
 new_ax = fig.add_axes(rect)
 
-n, bins, patches = new_ax.hist(na.random.randn(1000)+20, 50,
+n, bins, patches = new_ax.hist(np.random.randn(1000)+20, 50,
     facecolor='yellow', edgecolor='yellow')
 new_ax.set_xlabel('Dinosaurs per furlong')
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -379,7 +379,7 @@
    something_else``.  Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do ``a =
    a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-   ``na.multiply(a, 3, a)``.
+   ``np.multiply(a, 3, a)``.
  * In general, avoid all double-underscore method names: ``__something`` is
    usually unnecessary.
  * Doc strings should describe input, output, behavior, and any state changes

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -12,7 +12,7 @@
 computation engine, Matplotlib for some visualization tasks and Mercurial for
 version control.  Because installation of all of these interlocking parts can 
 be time-consuming, yt provides an installation script which downloads and builds
-a fully-isolated Python + Numpy + Matplotlib + HDF5 + Mercurial installation.  
+a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
 other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
 supported.
@@ -86,16 +86,41 @@
 Alternative Installation Methods
 --------------------------------
 
-If you want to forego the use of the install script, you need to make sure 
-you have yt's dependencies installed on your system.  These include: a C compiler, 
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``numpy``, and 
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``)
-to install yt as:
+If you want to forego the use of the install script, you need to make sure you
+have yt's dependencies installed on your system.  These include: a C compiler,
+``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
+``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
+install yt as:
 
 .. code-block:: bash
 
   $ pip install yt
 
+The source code for yt may be found at the Bitbucket project site and can also be
+utilized for installation. If you prefer to use it instead of relying on external
+tools, you will need ``mercurial`` to clone the official repo:
+
+.. code-block:: bash
+
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user
+
+It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
+Please refer to ``setuptools`` documentation for the additional options.
+
+Provided that the required dependencies are in a predictable location, yt should
+be able to find them automatically. However, you can manually specify prefix used
+for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
+``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
+environmental variables respectively, e.g.
+
+.. code-block:: bash
+
+  $ echo '/usr/local' > hdf5.cfg
+  $ export FTYPE_DIR=/opt/freetype
+
 If you choose this installation method, you do not need to run the activation
 script as it is unnecessary.
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -709,8 +709,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ConstructedRootGrid
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ExtractedHierarchy
 
 
 Testing Infrastructure

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:d75e416150ccb017cfdf89973f8d4463e780da4d9bdc9a3783001d22021d9081"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -154,7 +155,7 @@
       "Npixels = 512 \n",
       "cam = pf.h.camera(c, L, W, Npixels, tfh.tf, fields=['temperature'],\n",
       "                  north_vector=[1.,0.,0.], steady_north=True, \n",
-      "                  sub_samples=5, no_ghost=False, l_max=0)\n",
+      "                  sub_samples=5, no_ghost=False)\n",
       "\n",
       "# Here we substitute the TransferFunction we constructed earlier.\n",
       "cam.transfer_function = tfh.tf\n",

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -32,8 +32,8 @@
    data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
                            (8., 'kpc'), (1., 'kpc'))
 
-   c_min = 10**na.floor(na.log10(data_source['density']).min()  )
-   c_max = 10**na.floor(na.log10(data_source['density']).max()+1)
+   c_min = 10**np.floor(np.log10(data_source['density']).min()  )
+   c_max = 10**np.floor(np.log10(data_source['density']).max()+1)
 
    function = 'self.data[\'Density\'].size > 20'
    master_clump = Clump(data_source, None, 'density', function=function)

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -254,7 +254,7 @@
    c = [0.5, 0.5, 0.5]
    N = 512
    image = off_axis_projection(pf, c, L, W, N, "density")
-   write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+   write_image(np.log10(image), "%s_offaxis_projection.png" % pf)
 
 Here, ``W`` is the width of the projection in the x, y, *and* z
 directions.

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -4,10 +4,9 @@
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
 
-
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
-        minError=1E-5, complexLim=.999,
-        fitLim=.99, minLength=3, 
+        minError=1E-4, complexLim=.995,
+        fitLim=.97, minLength=3, 
         maxLength=1000, splitLim=.99,
         output_file=None):
 
@@ -90,6 +89,7 @@
     fluxData[0]=1
     fluxData[-1]=1
 
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -111,6 +111,7 @@
             yDatBounded=fluxData[b[1]:b[2]]
             yFitBounded=yFit[b[1]:b[2]]
 
+
             #Find init redshift
             z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
 
@@ -121,24 +122,33 @@
 
             #Fit Using complex tools
             newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
-                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+                    z,fitLim,minError,speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+               newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError)
+
+            if na.size(newLinesP)> 0:
+
+                #Check for EXPLOOOOSIIONNNSSS
+                newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
+
 
             #Check existence of partner lines if applicable
             if len(speciesDict['wavelength']) != 1:
                 newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                        b, minError*(b[2]-b[1]),
-                        x0, xRes, speciesDict)
+                        b, minError, x0, xRes, speciesDict)
 
-            #If flagged as a bad fit, species is lyman alpha,
-            #   and it may be a saturated line, use special tools
-            if flag and species=='lya' and min(yDatBounded)<.1:
-                newLinesP=_large_flag_fit(xBounded,yDatBounded,
-                        yFitBounded,z,speciesDict,
-                        minSize,minError*(b[2]-b[1]))
+
+
 
             #Adjust total current fit
             yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
 
+
             #Add new group to all fitted lines
             if na.size(newLinesP)>0:
                 speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
@@ -149,6 +159,7 @@
 
         allSpeciesLines[species]=speciesLines
 
+
     if output_file:
         _output_fit(allSpeciesLines, output_file)
 
@@ -205,10 +216,12 @@
     #Setup initial line guesses
     if initP==None: #Regular fit
         initP = [0,0,0] 
-        if min(yDat)<.5: #Large lines get larger initial guess 
-            initP[0] = 10**16
+        if min(yDat)<.01: #Large lines get larger initial guess 
+            initP[0] = speciesDict['init_N']*10**2
+        elif min(yDat)<.5:
+            initP[0] = speciesDict['init_N']*10**1
         elif min(yDat)>.9: #Small lines get smaller initial guess
-            initP[0] = 10**12.5
+            initP[0] = speciesDict['init_N']*10**-1
         else:
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
@@ -225,9 +238,16 @@
         return [],False
     
     #Values to proceed through first run
-    errSq,prevErrSq=1,1000
+    errSq,prevErrSq,prevLinesP=1,10*len(x),[]
 
+    if errBound == None:
+        errBound = len(yDat)*(max(1-yDat)*1E-2)**2
+    else:
+        errBound = errBound*len(yDat)
+
+    flag = False
     while True:
+
         #Initial parameter guess from joining parameters from all lines
         #   in lines into a single array
         initP = linesP.flatten()
@@ -237,6 +257,7 @@
                 args=(x,yDat,yFit,speciesDict),
                 epsfcn=1E-10,maxfev=1000)
 
+
         #Set results of optimization
         linesP = na.reshape(fitP,(-1,3))
 
@@ -247,17 +268,23 @@
         #Sum to get idea of goodness of fit
         errSq=sum(dif**2)
 
+        if any(linesP[:,1]==speciesDict['init_b']):
+         #   linesP = prevLinesP
+
+            flag = True
+            break
+            
         #If good enough, break
-        if errSq < errBound: 
+        if errSq < errBound:        
             break
 
         #If last fit was worse, reject the last line and revert to last fit
-        if errSq > prevErrSq*10:
+        if errSq > prevErrSq*10 :
             #If its still pretty damn bad, cut losses and try flag fit tools
             if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                linesP = prevLinesP
                 break
 
         #If too many lines 
@@ -266,21 +293,26 @@
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                break 
+                flag = True
+                break
 
         #Store previous data in case reject next fit
         prevErrSq = errSq
         prevLinesP = linesP
 
-
         #Set up initial condition for new line
         newP = [0,0,0] 
-        if min(dif)<.1:
-            newP[0]=10**12
-        elif min(dif)>.9:
-            newP[0]=10**16
+
+        yAdjusted = 1+yFit*yNewFit-yDat
+ 
+        if min(yAdjusted)<.01: #Large lines get larger initial guess 
+            newP[0] = speciesDict['init_N']*10**2
+        elif min(yAdjusted)<.5:
+            newP[0] = speciesDict['init_N']*10**1
+        elif min(yAdjusted)>.9: #Small lines get smaller initial guess
+            newP[0] = speciesDict['init_N']*10**-1
         else:
-            newP[0]=10**14
+            newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
         linesP=na.append(linesP,[newP],axis=0)
@@ -290,12 +322,12 @@
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict)
+        check=_check_params(na.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
     linesP = na.delete(linesP,remove,axis=0)
 
-    return linesP,False
+    return linesP,flag
 
 def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
     """
@@ -489,6 +521,9 @@
     #List of lines to remove
     removeLines=[]
 
+    #Set error
+
+
     #Iterate through all sets of line parameters
     for i,p in enumerate(linesP):
 
@@ -501,16 +536,23 @@
             lb = _get_bounds(p[2],b,wl,x0,xRes)
             xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
 
+            if errBound == None:
+                errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
+            else:
+                errBound = 10*errBound*len(yb)
+
             #Generate a fit and find the difference to data
             yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
             dif =yb-yFitb
 
+
+
             #Only counts as an error if line is too big ---------------<
             dif = [k for k in dif if k>0]
             err = sum(dif)
 
             #If the fit is too bad then add the line to list of removed lines
-            if err > errBound*1E2:
+            if err > errBound:
                 removeLines.append(i)
                 break
 
@@ -640,21 +682,13 @@
         #Check if the region needs to be divided
         if b[2]-b[1]>maxLength:
 
-            #Find the minimum absorption in the middle two quartiles of
-            #   the large complex
-            q=(b[2]-b[1])/4
-            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+            split = _split_region(yDat,b,splitLim)
 
-            #Only break it up if the minimum absorption is actually low enough
-            if yDat[cut]>splitLim:
-
-                #Get the new two peaks
-                b1Peak = yDat[b[1]:cut].argmin()+b[1]
-                b2Peak = yDat[cut:b[2]].argmin()+cut
+            if split:
 
                 #add the two regions separately
-                cBounds.insert(i+1,[b1Peak,b[1],cut])
-                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+                cBounds.insert(i+1,split[0])
+                cBounds.insert(i+2,split[1])
 
                 #Remove the original region
                 cBounds.pop(i)
@@ -663,7 +697,33 @@
 
     return cBounds
 
-def _gen_flux_lines(x, linesP, speciesDict):
+
+def _split_region(yDat,b,splitLim):
+        #Find the minimum absorption in the middle two quartiles of
+    #   the large complex
+
+    q=(b[2]-b[1])/4
+    cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+    #Only break it up if the minimum absorption is actually low enough
+    if yDat[cut]>splitLim:
+
+        #Get the new two peaks
+        b1Peak = yDat[b[1]:cut].argmin()+b[1]
+        b2Peak = yDat[cut:b[2]].argmin()+cut
+
+        region_1 = [b1Peak,b[1],cut]
+        region_2 = [b2Peak,cut,b[2]]
+
+        return [region_1,region_2]
+
+    else:
+
+        return []
+
+
+
+def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
     """
     Calculates the normalized flux for a region of wavelength space
     generated by a set of absorption lines.
@@ -692,6 +752,9 @@
             g=speciesDict['Gamma'][i]
             wl=speciesDict['wavelength'][i]
             y = y+ _gen_tau(x,p,f,g,wl)
+            if firstLine: 
+                break
+
     flux = na.exp(-y)
     return flux
 
@@ -744,21 +807,25 @@
         the difference between the fit generated by the parameters
         given in pTotal multiplied by the previous fit and the desired
         flux profile, w/ first index modified appropriately for bad 
-        parameter choices
+        parameter choices and additional penalty for fitting with a lower
+        flux than observed.
     """
 
     pTotal.shape = (-1,3)
     yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
 
     error = yDat-yFit*yNewFit
-    error[0] = _check_params(pTotal,speciesDict)
+    error_plus = (yDat-yFit*yNewFit).clip(min=0)
+
+    error = error+error_plus
+    error[0] = _check_params(pTotal,speciesDict,x)
 
     return error
 
-def _check_params(p, speciesDict):
+def _check_params(p, speciesDict,xb):
     """
     Check to see if any of the parameters in p fall outside the range 
-        given in speciesDict.
+        given in speciesDict or on the boundaries
 
     Parameters
     ----------
@@ -767,6 +834,8 @@
     speciesDict : dictionary
         dictionary with properties giving the max and min
         values appropriate for each parameter N,b, and z.
+    xb : (N) ndarray
+        wavelength array [nm]
 
     Returns
     -------
@@ -774,16 +843,137 @@
         0 if all values are fine
         999 if any values fall outside acceptable range
     """
+
+    minz = (xb[0])/speciesDict['wavelength'][0]-1
+    maxz = (xb[-1])/speciesDict['wavelength'][0]-1
+
     check = 0
-    if any(p[:,0] > speciesDict['maxN']) or\
-          any(p[:,0] < speciesDict['minN']) or\
-          any(p[:,1] > speciesDict['maxb']) or\
-          any(p[:,1] < speciesDict['minb']) or\
-          any(p[:,2] > speciesDict['maxz']) or\
-          any(p[:,2] < speciesDict['minz']):
+    if any(p[:,0] >= speciesDict['maxN']) or\
+          any(p[:,0] <= speciesDict['minN']) or\
+          any(p[:,1] >= speciesDict['maxb']) or\
+          any(p[:,1] <= speciesDict['minb']) or\
+          any(p[:,2] >= maxz) or\
+          any(p[:,2] <= minz):
               check = 999
+              
     return check
 
+def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
+
+    """
+    Check to see if any of the parameters in p are the
+    same as initial paramters and if so, attempt to 
+    split the region and refit it.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    """
+
+    # Check if anything is a default parameter
+    if any(p[:,0] == speciesDict['init_N']) or\
+          any(p[:,0] == speciesDict['init_N']*10) or\
+          any(p[:,0] == speciesDict['init_N']*100) or\
+          any(p[:,0] == speciesDict['init_N']*.1) or\
+          any(p[:,1] == speciesDict['init_b']) or\
+          any(p[:,1] == speciesDict['maxb']):
+
+            # These are the initial bounds
+            init_bounds = [yDat.argmin(),0,len(xb)-1]
+
+            # Gratitutous limit for splitting region
+            newSplitLim = 1 - (1-min(yDat))*.5
+
+            # Attempt to split region
+            split = _split_region(yDat,init_bounds,newSplitLim)
+            
+            # If we can't split it, just reject it. Its unphysical
+            # to just keep the default parameters and we're out of
+            # options at this point
+            if not split:
+                return []
+
+            # Else set up the bounds for each region and fit separately
+            b1,b2 = split[0][2], split[1][1]
+
+            p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
+                            initz, minSize, errorBound, speciesDict)
+
+            p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
+                            initz, minSize, errorBound, speciesDict)
+
+            # Make the final line parameters. Its annoying because
+            # one or both regions may have fit to nothing
+            if na.size(p1)> 0 and na.size(p2)>0:
+                p = na.r_[p1,p2]
+            elif na.size(p1) > 0:
+                p = p1
+            else:
+                p = p2
+
+    return p
+
+
+def _check_numerical_instability(x, p, speciesDict,b):
+
+    """
+    Check to see if any of the parameters in p are causing
+    unstable numerical effects outside the region of fit
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    b : (3) list
+        list of integers indicating bounds of region fit in x
+    """
+
+    remove_lines = []
+
+
+    for i,line in enumerate(p):
+
+        # First to check if the line is at risk for instability
+        if line[1]<5 or line[0] < 1E12:
+
+
+            # get all flux that isn't part of fit plus a little wiggle room
+            # max and min to prevent boundary errors
+
+            flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
+            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+
+            #Find regions that are absorbing outside the region we fit
+            flux_dif = 1 - flux
+            absorbing_coefficient = max(abs(flux_dif))
+
+
+            #Really there shouldn't be any absorption outside
+            #the region we fit, but we'll give some leeway.
+            #for high resolution spectra the tiny bits on the edges
+            #can give a non negligible amount of flux. Plus the errors
+            #we are looking for are HUGE.
+            if absorbing_coefficient > .1:
+
+                # we just set it to no fit because we've tried
+                # everything else at this point. this region just sucks :(
+                remove_lines.append(i)
+    
+    if remove_lines:
+        p = na.delete(p, remove_lines, axis=0)
+
+    return p
 
 def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
     """
@@ -815,4 +1005,5 @@
         f.create_dataset("{0}/z".format(ion),data=params['z'])
         f.create_dataset("{0}/complex".format(ion),data=params['group#'])
     print 'Writing spectrum fit to {0}'.format(file_name)
+    f.close()
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -63,12 +63,6 @@
     HaloProfiler, \
     FakeProfile
 
-from .hierarchy_subset.api import \
-    ConstructedRootGrid, \
-    AMRExtractedGridProxy, \
-    ExtractedHierarchy, \
-    ExtractedParameterFile
-
 from .level_sets.api import \
     identify_contours, \
     Clump, \

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -129,7 +129,7 @@
         """
         if self.CoM is not None:
             return self.CoM
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         c = {}
         # We shift into a box where the origin is the left edge
         c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
@@ -199,7 +199,7 @@
         """
         if self.group_total_mass is not None:
             return self.group_total_mass
-        return self["ParticleMassMsun"].sum()
+        return self["particle_mass"].in_units('Msun').sum()
 
     def bulk_velocity(self):
         r"""Returns the mass-weighted average velocity in cm/s.
@@ -213,7 +213,7 @@
         """
         if self.bulk_vel is not None:
             return self.bulk_vel
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
@@ -234,7 +234,7 @@
         if self.rms_vel is not None:
             return self.rms_vel
         bv = self.bulk_velocity()
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         sm = pm.sum()
         vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
@@ -331,7 +331,7 @@
         handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
-                   + ["particle_index"] + ["ParticleMassMsun"]:
+                   + ["particle_index"] + ["particle_mass"].in_units('Msun'):
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
         if 'creation_time' in self.data.pf.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
@@ -464,7 +464,7 @@
         if self["particle_position_x"].size > 1:
             for index in np.unique(inds):
                 self.mass_bins[index] += \
-                np.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["particle_mass"][inds == index]).in_units('Msun')
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -750,7 +750,7 @@
             inds = np.digitize(dist, self.radial_bins) - 1
             for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    np.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["particle_mass"][inds == index]).in_units('Msun')
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -1356,7 +1356,7 @@
     _name = "HOP"
     _halo_class = HOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
-              ["ParticleMassMsun"]
+              ["particle_mass"]
 
     def __init__(self, data_source, threshold=160.0, dm_only=True):
         self.threshold = threshold
@@ -1368,7 +1368,7 @@
             RunHOP(self.particle_fields["particle_position_x"] / self.period[0],
                 self.particle_fields["particle_position_y"] / self.period[1],
                 self.particle_fields["particle_position_z"] / self.period[2],
-                self.particle_fields["ParticleMassMsun"],
+                self.particle_fields["particle_mass"].in_units('Msun'),
                 self.threshold)
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
@@ -1555,7 +1555,7 @@
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
-              ["ParticleMassMsun", "particle_index"]
+              ["particle_mass", "particle_index"]
 
     def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
         period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
@@ -1589,8 +1589,8 @@
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
-            self.particle_fields['ParticleMassMsun'])
+        np.divide(self.particle_fields['particle_mass'].in_units('Msun'), self.total_mass,
+            self.particle_fields['particle_mass'])
         np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
         np.divide(self.particle_fields["particle_position_y"],
@@ -2190,7 +2190,7 @@
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
         if total_mass is None:
-            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
+            total_mass = self.comm.mpi_allreduce((self._data_source["particle_mass"].in_units('Msun').astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
             self.padding = (np.zeros(3, dtype='float64'),
@@ -2386,9 +2386,9 @@
             if dm_only:
                 select = self._get_dm_indices()
                 total_mass = \
-                    self.comm.mpi_allreduce((self._data_source['all', "ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                    self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2409,10 +2409,10 @@
             sub_mass = total_mass
         elif dm_only:
             select = self._get_dm_indices()
-            sub_mass = self._data_source["ParticleMassMsun"][select].sum(dtype='float64')
+            sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0]
+                self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -11,234 +11,232 @@
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
-	struct particle:
-		np.int64_t id
-		float pos[6]
+    struct particle:
+        np.int64_t id
+        float pos[6]
 
 cdef import from "fof.h":
-	struct fof:
-		np.int64_t num_p
-		particle *particles
+    struct fof:
+        np.int64_t num_p
+        particle *particles
 
 cdef import from "halo.h":
-	struct halo:
-		np.int64_t id
-		float pos[6], corevel[3], bulkvel[3]
-		float m, r, child_r, vmax_r, mgrav,	vmax, rvmax, rs, klypin_rs, vrms
-		float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
-		float bullock_spin, kin_to_pot
-		np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-		float min_pos_err, min_vel_err, min_bulkvel_err
+    struct halo:
+        np.int64_t id
+        float pos[6], corevel[3], bulkvel[3]
+        float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+        float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+        float bullock_spin, kin_to_pot
+        np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+        float min_pos_err, min_vel_err, min_bulkvel_err
 
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
 
 cdef import from "groupies.h":
-	void find_subs(fof *f) 
-	halo *halos
-	np.int64_t num_halos
-	void calc_mass_definition()
+    void find_subs(fof *f) nogil
+    halo *halos
+    np.int64_t num_halos
+    void calc_mass_definition() nogil
 
 # For outputing halos, rockstar style
 
 cdef import from "meta_io.h":
-	void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) 
+    void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) nogil
 
 # For setting up the configuration of rockstar
 
 cdef import from "config.h":
-	void setup_config()
+    void setup_config() nogil
 
 cdef import from "config_vars.h":
-	# Rockstar cleverly puts all of the config variables inside a templated
-	# definition of their vaiables.
-	char *FILE_FORMAT
-	np.float64_t PARTICLE_MASS
+    # Rockstar cleverly puts all of the config variables inside a templated
+    # definition of their vaiables.
+    char *FILE_FORMAT
+    np.float64_t PARTICLE_MASS
 
-	char *MASS_DEFINITION
-	np.int64_t MIN_HALO_OUTPUT_SIZE
-	np.float64_t FORCE_RES
+    char *MASS_DEFINITION
+    np.int64_t MIN_HALO_OUTPUT_SIZE
+    np.float64_t FORCE_RES
 
-	np.float64_t SCALE_NOW
-	np.float64_t h0
-	np.float64_t Ol
-	np.float64_t Om
+    np.float64_t SCALE_NOW
+    np.float64_t h0
+    np.float64_t Ol
+    np.float64_t Om
 
-	np.int64_t GADGET_ID_BYTES
-	np.float64_t GADGET_MASS_CONVERSION
-	np.float64_t GADGET_LENGTH_CONVERSION
-	np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
-	np.int64_t RESCALE_PARTICLE_MASS
+    np.int64_t GADGET_ID_BYTES
+    np.float64_t GADGET_MASS_CONVERSION
+    np.float64_t GADGET_LENGTH_CONVERSION
+    np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
+    np.int64_t RESCALE_PARTICLE_MASS
 
-	np.int64_t PARALLEL_IO
-	char *PARALLEL_IO_SERVER_ADDRESS
-	char *PARALLEL_IO_SERVER_PORT
-	np.int64_t PARALLEL_IO_WRITER_PORT
-	char *PARALLEL_IO_SERVER_INTERFACE
-	char *RUN_ON_SUCCESS
+    np.int64_t PARALLEL_IO
+    char *PARALLEL_IO_SERVER_ADDRESS
+    char *PARALLEL_IO_SERVER_PORT
+    np.int64_t PARALLEL_IO_WRITER_PORT
+    char *PARALLEL_IO_SERVER_INTERFACE
+    char *RUN_ON_SUCCESS
 
-	char *INBASE
-	char *FILENAME
-	np.int64_t STARTING_SNAP
-	np.int64_t NUM_SNAPS
-	np.int64_t NUM_BLOCKS
-	np.int64_t NUM_READERS
-	np.int64_t PRELOAD_PARTICLES
-	char *SNAPSHOT_NAMES
-	char *LIGHTCONE_ALT_SNAPS
-	char *BLOCK_NAMES
+    char *INBASE
+    char *FILENAME
+    np.int64_t STARTING_SNAP
+    np.int64_t NUM_SNAPS
+    np.int64_t NUM_BLOCKS
+    np.int64_t NUM_READERS
+    np.int64_t PRELOAD_PARTICLES
+    char *SNAPSHOT_NAMES
+    char *LIGHTCONE_ALT_SNAPS
+    char *BLOCK_NAMES
 
-	char *OUTBASE
-	np.float64_t OVERLAP_LENGTH
-	np.int64_t NUM_WRITERS
-	np.int64_t FORK_READERS_FROM_WRITERS
-	np.int64_t FORK_PROCESSORS_PER_MACHINE
+    char *OUTBASE
+    np.float64_t OVERLAP_LENGTH
+    np.int64_t NUM_WRITERS
+    np.int64_t FORK_READERS_FROM_WRITERS
+    np.int64_t FORK_PROCESSORS_PER_MACHINE
 
-	char *OUTPUT_FORMAT
-	np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
-	np.int64_t FULL_PARTICLE_CHUNKS
-	char *BGC2_SNAPNAMES
+    char *OUTPUT_FORMAT
+    np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
+    np.int64_t FULL_PARTICLE_CHUNKS
+    char *BGC2_SNAPNAMES
 
-	np.int64_t BOUND_PROPS
-	np.int64_t BOUND_OUT_TO_HALO_EDGE
-	np.int64_t DO_MERGER_TREE_ONLY
-	np.int64_t IGNORE_PARTICLE_IDS
-	np.float64_t TRIM_OVERLAP
-	np.float64_t ROUND_AFTER_TRIM
-	np.int64_t LIGHTCONE
-	np.int64_t PERIODIC
+    np.int64_t BOUND_PROPS
+    np.int64_t BOUND_OUT_TO_HALO_EDGE
+    np.int64_t DO_MERGER_TREE_ONLY
+    np.int64_t IGNORE_PARTICLE_IDS
+    np.float64_t TRIM_OVERLAP
+    np.float64_t ROUND_AFTER_TRIM
+    np.int64_t LIGHTCONE
+    np.int64_t PERIODIC
 
-	np.float64_t LIGHTCONE_ORIGIN[3]
-	np.float64_t LIGHTCONE_ALT_ORIGIN[3]
+    np.float64_t LIGHTCONE_ORIGIN[3]
+    np.float64_t LIGHTCONE_ALT_ORIGIN[3]
 
-	np.float64_t LIMIT_CENTER[3]
-	np.float64_t LIMIT_RADIUS
+    np.float64_t LIMIT_CENTER[3]
+    np.float64_t LIMIT_RADIUS
 
-	np.int64_t SWAP_ENDIANNESS
-	np.int64_t GADGET_VARIANT
+    np.int64_t SWAP_ENDIANNESS
+    np.int64_t GADGET_VARIANT
 
-	np.float64_t FOF_FRACTION
-	np.float64_t FOF_LINKING_LENGTH
-	np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
-	np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
-	np.int64_t TEMPORAL_HALO_FINDING
-	np.int64_t MIN_HALO_PARTICLES
-	np.float64_t UNBOUND_THRESHOLD
-	np.int64_t ALT_NFW_METRIC
+    np.float64_t FOF_FRACTION
+    np.float64_t FOF_LINKING_LENGTH
+    np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
+    np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
+    np.int64_t TEMPORAL_HALO_FINDING
+    np.int64_t MIN_HALO_PARTICLES
+    np.float64_t UNBOUND_THRESHOLD
+    np.int64_t ALT_NFW_METRIC
 
-	np.int64_t TOTAL_PARTICLES
-	np.float64_t BOX_SIZE
-	np.int64_t OUTPUT_HMAD
-	np.int64_t OUTPUT_PARTICLES
-	np.int64_t OUTPUT_LEVELS
-	np.float64_t DUMP_PARTICLES[3]
+    np.int64_t TOTAL_PARTICLES
+    np.float64_t BOX_SIZE
+    np.int64_t OUTPUT_HMAD
+    np.int64_t OUTPUT_PARTICLES
+    np.int64_t OUTPUT_LEVELS
+    np.float64_t DUMP_PARTICLES[3]
 
-	np.float64_t AVG_PARTICLE_SPACING
-	np.int64_t SINGLE_SNAP
+    np.float64_t AVG_PARTICLE_SPACING
+    np.int64_t SINGLE_SNAP
 
 
 
 cdef class RockstarGroupiesInterface:
-	
-	cdef public object pf
-	cdef public object fof
+    
+    cdef public object pf
+    cdef public object fof
 
-	# For future use/consistency
-	def __cinit__(self,pf):
-		self.pf = pf
+    # For future use/consistency
+    def __cinit__(self,pf):
+        self.pf = pf
 
-	def setup_rockstar(self,
-						particle_mass,
-						int periodic = 1, force_res=None,
-						int min_halo_size = 25, outbase = "None",
-						callbacks = None):
-		global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
-		global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
-		global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-		global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-		global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
-		
+    def setup_rockstar(self,
+                        particle_mass,
+                        int periodic = 1, force_res=None,
+                        int min_halo_size = 25, outbase = "None",
+                        callbacks = None):
+        global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
+        global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
+        global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
+        global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        
 
-		if force_res is not None:
-			FORCE_RES=np.float64(force_res)
+        if force_res is not None:
+            FORCE_RES=np.float64(force_res)
 
-		OVERLAP_LENGTH = 0.0
-		
-		FILENAME = "inline.<block>"
-		FILE_FORMAT = "GENERIC"
-		OUTPUT_FORMAT = "ASCII"
-		MIN_HALO_OUTPUT_SIZE=min_halo_size
-		
-		pf = self.pf
+        OVERLAP_LENGTH = 0.0
+        
+        FILENAME = "inline.<block>"
+        FILE_FORMAT = "GENERIC"
+        OUTPUT_FORMAT = "ASCII"
+        MIN_HALO_OUTPUT_SIZE=min_halo_size
+        
+        pf = self.pf
 
-		h0 = pf.hubble_constant
-		Ol = pf.omega_lambda
-		Om = pf.omega_matter
-		
-		SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-		
-		if not outbase =='None'.decode('UTF-8'):
-			#output directory. since we can't change the output filenames
-			#workaround is to make a new directory
-			OUTBASE = outbase 
+        h0 = pf.hubble_constant
+        Ol = pf.omega_lambda
+        Om = pf.omega_matter
+        
+        SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+        
+        if not outbase =='None'.decode('UTF-8'):
+            #output directory. since we can't change the output filenames
+            #workaround is to make a new directory
+            OUTBASE = outbase 
 
 
-		PARTICLE_MASS = particle_mass.in_units('Msun/h')
-		PERIODIC = periodic
-		BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
+        PARTICLE_MASS = particle_mass.in_units('Msun/h')
+        PERIODIC = periodic
+        BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
 
-		# Set up the configuration options
-		setup_config()
+        # Set up the configuration options
+        setup_config()
 
-		# Needs to be called so rockstar can use the particle mass parameter
-		# to calculate virial quantities properly
-		calc_mass_definition()
+        # Needs to be called so rockstar can use the particle mass parameter
+        # to calculate virial quantities properly
+        calc_mass_definition()
 
+    def output_halos(self):
+        output_halos(0, 0, 0, NULL) 
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pid,
+                                np.ndarray[np.float64_t, ndim=2] pos,
+                                np.ndarray[np.float64_t, ndim=2] vel,
+                                np.ndarray[np.int64_t, ndim=1] fof_tags,
+                                np.int64_t nfof,
+                                np.int64_t npart_max):
 
-	def make_rockstar_fof(self,fof_ids, pos, vel):
+        # Define fof object
 
-		# Turn positions and velocities into units we want
-		pos = pos.in_units('Mpccm/h')
-		vel = vel.in_units('km/s')
+        # Find number of particles
+        cdef np.int64_t i, j
+        cdef np.int64_t num_particles = pid.shape[0]
 
-		# Define fof object
-		cdef fof fof_obj
+        # Allocate space for correct number of particles
+        cdef particle* particles = <particle*> malloc(npart_max * sizeof(particle))
+        cdef fof fof_obj
+        fof_obj.particles = particles
 
-		# Find number of particles
-		cdef np.int64_t num_particles = len(fof_ids)
+        cdef np.int64_t last_fof_tag = 1
+        cdef np.int64_t k = 0
+        for i in range(num_particles):
+            if fof_tags[i] == 0:
+                continue
+            if fof_tags[i] != last_fof_tag:
+                last_fof_tag = fof_tags[i]
+                if k > 16:
+                    print "Finding subs", k, i
+                    fof_obj.num_p = k
+                    find_subs(&fof_obj)
+                k = 0
+            particles[k].id = pid[i]
 
-		# Allocate space for correct number of particles
-		cdef particle* particles = <particle*> malloc(num_particles * sizeof(particle))
+            # fill in locations & velocities
+            for j in range(3):
+                particles[k].pos[j] = pos[i,j]
+                particles[k].pos[j+3] = vel[i,j]
+            k += 1
+        free(particles)
 
-		# Fill in array of particles with particle that fof identified
-		# This is possibly the slowest way to code this, but for now
-		# I just want it to work
-		for i,id in enumerate(fof_ids):
-			particles[i].id = id
 
-			# fill in locations & velocities
-			for j in range(3):
-				particles[i].pos[j] = pos[id][j]
-				particles[i].pos[j+3] = vel[id][j]
 
-
-		# Assign pointer to particles into FOF object 
-		fof_obj.particles = particles
-
-		# Assign number of particles into FOF object
-		fof_obj.num_p = num_particles
-
-		# Make pointer to fof object
-		cdef fof* fof_pointer = & fof_obj
-
-		# Find the sub halos using rockstar by passing a pointer to the fof object
-		find_subs( fof_pointer)
-
-		# Output the halos, rockstar style
-		output_halos(0, 0, 0, NULL) 
-
-		free(particles)
-
-
-

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -24,5 +24,12 @@
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
+    config.add_extension("rockstar_groupies",
+                         "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
+                         library_dirs=[rd],
+                         libraries=["rockstar"],
+                         include_dirs=[rd,
+                                       os.path.join(rd, "io"),
+                                       os.path.join(rd, "util")])
     return config
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,9 +1,7 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
 import os.path
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_finding', parent_package, top_path)
@@ -12,6 +10,5 @@
     config.add_subpackage("parallel_hop")
     if os.path.exists("rockstar.cfg"):
         config.add_subpackage("rockstar")
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
+    config.make_config_py()  # installs __config__.py
     return config

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 6fa87627d9beefbe93bc2a1952a99e64c1f30cde yt/analysis_modules/hierarchy_subset/api.py
--- a/yt/analysis_modules/hierarchy_subset/api.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-API for hierarchy_subset
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .hierarchy_subset import \
-    ConstructedRootGrid, \
-    AMRExtractedGridProxy, \
-    ExtractedHierarchy, \
-    ExtractedParameterFile

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/71196d4289bf/
Changeset:   71196d4289bf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-20 02:14:16
Summary:     Merging to fix confusing problem.
Affected #:  1 file

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list