[yt-svn] commit/yt: 29 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Apr 14 09:49:31 PDT 2016


29 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/cb86fb5fa4e2/
Changeset:   cb86fb5fa4e2
Branch:      yt
User:        MatthewTurk
Date:        2016-01-21 19:02:45+00:00
Summary:     Adding three new colormaps, names to be changed later.
Affected #:  1 file

diff -r db80e4d8d927d3fc117fe270a91e1cbf4a8b01a8 -r cb86fb5fa4e2376621b0c03cdf930fa2071c446d yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7816,6 +7816,422 @@
 np.ones(256),
 )
 
+_parameters = {'xp': [-6.0027356902356814, -42.46106902356901,
+                      41.393097643097661, 69.344486531986547, 6.15004208754209,
+                      17.695180976430976],
+               'yp': [-19.704861111111086, 56.857638888888886,
+                       -8.1597222222222001, 58.680555555555543, -23.958333333333314,
+                      -16.059027777777771],
+               'min_Jp': 17.1875,
+               'max_Jp': 82.1875}
+color_map_luts["cm_candidate_mjt"] = (array(
+  [ 0.01845663, 0.01940818, 0.02066025, 0.02218966, 0.02395409, 0.02595033,
+    0.02817596, 0.03060653, 0.03322304, 0.03602798, 0.03900455, 0.04208415,
+    0.04516324, 0.04823603, 0.05128648, 0.05431253, 0.05730541, 0.06025524,
+    0.0631607 , 0.06601581, 0.0688137 , 0.07155484, 0.07423302, 0.07684491,
+    0.07939306, 0.08186684, 0.08427203, 0.08660745, 0.08886448, 0.09105658,
+    0.09316971, 0.09521672, 0.09719719, 0.09910774, 0.10096841, 0.10275846,
+    0.10451309, 0.10621217, 0.10788683, 0.10952759, 0.1111585 , 0.11277895,
+    0.11440998, 0.11605498, 0.11773643, 0.11945691, 0.12124361, 0.1230952,
+    0.12504265, 0.12708539, 0.12924907, 0.13154308, 0.13398218, 0.13657917,
+    0.13934934, 0.14230244, 0.14544595, 0.14880137, 0.15236868, 0.15615269,
+    0.16016659, 0.16442043, 0.16890677, 0.1736277 , 0.17858407, 0.18378678,
+    0.18922358, 0.19488582, 0.20076673, 0.2068576 , 0.21314778, 0.21962487,
+    0.22627485, 0.23308241, 0.24003134, 0.2471049 , 0.25428629, 0.26155934,
+    0.26890755, 0.27631616, 0.28377157, 0.29126147, 0.29877494, 0.30630245,
+    0.31383585, 0.3213683 , 0.32889415, 0.33640883, 0.34390875, 0.35139113,
+    0.35885392, 0.36629566, 0.37371536, 0.38111248, 0.38848676, 0.39583821,
+    0.40316703, 0.41047357, 0.41775516, 0.42501568, 0.4322558 , 0.43947622,
+    0.44667761, 0.4538607 , 0.46102616, 0.4681747 , 0.4753059 , 0.48242091,
+    0.48952159, 0.49660864, 0.50368272, 0.5107445 , 0.51779465, 0.52483372,
+    0.53186264, 0.53888257, 0.54589416, 0.55289811, 0.55989507, 0.56688575,
+    0.57387113, 0.58085317, 0.58783159, 0.59480708, 0.60178032, 0.60875202,
+    0.61572288, 0.62269359, 0.6296661 , 0.63664175, 0.64361962, 0.65060034,
+    0.65758453, 0.66457277, 0.67156565, 0.67856369, 0.68556743, 0.69257733,
+    0.69959381, 0.70661727, 0.71364828, 0.72068785, 0.7277347 , 0.73478882,
+    0.74185008, 0.74891821, 0.7559928 , 0.76307522, 0.77016399, 0.77725777,
+    0.78435528, 0.79145495, 0.7985548 , 0.8056524 , 0.81274479, 0.8198318,
+    0.82691566, 0.83398271, 0.84102675, 0.84805028, 0.85504479, 0.8619907,
+    0.86889642, 0.87572945, 0.88249039, 0.88914374, 0.89568162, 0.90206422,
+    0.90825663, 0.91421853, 0.91990203, 0.92525379, 0.93021961, 0.93475119,
+    0.93881359, 0.94239114, 0.94548927, 0.94813199, 0.950356  , 0.95220389,
+    0.95371848, 0.95494533, 0.955918  , 0.95666379, 0.95722053, 0.95760023,
+    0.95783176, 0.95791958, 0.95789564, 0.9577547 , 0.95751076, 0.95718454,
+    0.95677095, 0.95627776, 0.95571186, 0.95508526, 0.95439943, 0.95365604,
+    0.95285946, 0.9520137 , 0.9511224 , 0.95018892, 0.94921639, 0.94820771,
+    0.94716564, 0.94609279, 0.94499169, 0.94386477, 0.9427144 , 0.94154291,
+    0.94035262, 0.9391458 , 0.93792477, 0.93669181, 0.93544924, 0.9341994,
+    0.93294465, 0.93168737, 0.93042998, 0.92917492, 0.92792467, 0.9266817,
+    0.92544809, 0.92422626, 0.92301878, 0.9218282 , 0.92065707, 0.91950796,
+    0.91838341, 0.91728597, 0.91621816, 0.91518248, 0.91418144, 0.9132175,
+    0.91229316, 0.91141094, 0.91057341, 0.90978329, 0.90904349, 0.90835722,
+    0.90772821, 0.90716087, 0.9066524 , 0.90620748, 0.90584124, 0.90556585,
+    0.90536904, 0.90529003, 0.90533583, 0.90556318, 0.90603649, 0.90695623,
+    0.90913313, 0.91657895, 0.92518702, 0.93347579]), array(
+  [ 0.14549808, 0.14959758, 0.15353745, 0.15733732, 0.1610376 , 0.16463933,
+    0.16813881, 0.17155785, 0.17491483, 0.17819541, 0.18141182, 0.18458839,
+    0.18770724, 0.19077643, 0.19381948, 0.19681897, 0.19978209, 0.20272758,
+    0.20564068, 0.20852987, 0.21140659, 0.21425995, 0.21710135, 0.21993298,
+    0.22274859, 0.22556395, 0.22836995, 0.23116975, 0.23397288, 0.23676848,
+    0.23957041, 0.24237069, 0.2451726 , 0.24798083, 0.25078633, 0.2536041,
+    0.25641791, 0.25924206, 0.26206407, 0.2648926 , 0.2677197 , 0.27054988,
+    0.27337716, 0.27620452, 0.27902542, 0.28184388, 0.28465047, 0.28745244,
+    0.29023681, 0.29301229, 0.2957673 , 0.2985035 , 0.30121847, 0.30390569,
+    0.30656793, 0.30919798, 0.31179276, 0.31435225, 0.3168706 , 0.31934487,
+    0.32177243, 0.32414968, 0.32647317, 0.32873992, 0.3309469 , 0.33309004,
+    0.33516695, 0.33717534, 0.33911292, 0.34097773, 0.34276826, 0.34448346,
+    0.34612285, 0.34768649, 0.34917502, 0.35058959, 0.35193184, 0.35320373,
+    0.35440776, 0.3555465 , 0.35662265, 0.35763896, 0.35859819, 0.35950301,
+    0.36035598, 0.36115947, 0.36191571, 0.36262669, 0.36329423, 0.36391995,
+    0.36450527, 0.36505146, 0.36555959, 0.36603063, 0.36646538, 0.36686454,
+    0.36722872, 0.36755843, 0.36785545, 0.36811875, 0.36834862, 0.36854528,
+    0.3687089 , 0.36883963, 0.36893758, 0.36900282, 0.36903596, 0.36903681,
+    0.36900473, 0.36893965, 0.36884148, 0.36871009, 0.36854532, 0.36834702,
+    0.36811484, 0.3678482 , 0.36754678, 0.36721023, 0.36683815, 0.36643008,
+    0.36598535, 0.36550255, 0.36498174, 0.36442226, 0.36382344, 0.36318451,
+    0.3625047 , 0.36178315, 0.36101802, 0.36020779, 0.35935264, 0.35845149,
+    0.35750326, 0.35650677, 0.35546082, 0.35436416, 0.35321545, 0.35201335,
+    0.35075644, 0.34944329, 0.34807215, 0.3466408 , 0.34514904, 0.34359543,
+    0.34197858, 0.34029709, 0.33854968, 0.33673306, 0.33484673, 0.33289005,
+    0.33086222, 0.3287627 , 0.32659138, 0.3243486 , 0.32203538, 0.31964923,
+    0.31718431, 0.31465481, 0.31206608, 0.3094119 , 0.3067005 , 0.30395628,
+    0.30116449, 0.29836962, 0.29556856, 0.29281389, 0.29011662, 0.28754017,
+    0.28514306, 0.28299549, 0.28118369, 0.27980714, 0.27897084, 0.27877267,
+    0.27928815, 0.28055737, 0.28257918, 0.28531437, 0.2886962 , 0.29264343,
+    0.29707203, 0.30189644, 0.30704703, 0.31246579, 0.318088  , 0.32388104,
+    0.32979805, 0.33582149, 0.34190762, 0.34805406, 0.35424005, 0.36044006,
+    0.36665573, 0.37287672, 0.37909448, 0.38529658, 0.39148116, 0.39764648,
+    0.40378866, 0.40990456, 0.41599162, 0.42204777, 0.42807135, 0.43406103,
+    0.44001575, 0.44593465, 0.45181705, 0.45766241, 0.46347028, 0.46924029,
+    0.47497214, 0.48066556, 0.4863203 , 0.49193612, 0.49751281, 0.50305013,
+    0.50854783, 0.51400566, 0.51942335, 0.52480059, 0.53013708, 0.53543248,
+    0.54068668, 0.54589935, 0.55107004, 0.55619831, 0.56128369, 0.56632567,
+    0.57132372, 0.5762773 , 0.58118582, 0.58604867, 0.5908652 , 0.59563471,
+    0.60035645, 0.60502957, 0.60965313, 0.61422605, 0.61874705, 0.62321462,
+    0.62762687, 0.63198146, 0.63627878, 0.64051559, 0.64468414, 0.64877752,
+    0.6527965 , 0.65672131, 0.66054037, 0.66421949, 0.66770704, 0.67086745,
+    0.67317041, 0.67282641, 0.67266658, 0.67286793]), array(
+  [ 0.31784919, 0.31399639, 0.31040105, 0.30704607, 0.30380333, 0.30070875,
+    0.29782465, 0.29508233, 0.29241622, 0.28993656, 0.28760832, 0.28531346,
+    0.28318494, 0.28120084, 0.27922989, 0.27740774, 0.27570877, 0.27402022,
+    0.27246419, 0.27099788, 0.26955239, 0.26822345, 0.26693985, 0.26569724,
+    0.2645547 , 0.26340288, 0.26231968, 0.26128857, 0.26024441, 0.2592725,
+    0.25827349, 0.25730732, 0.25634957, 0.25536473, 0.25441368, 0.25338488,
+    0.25238608, 0.25130798, 0.25022886, 0.24907581, 0.2478929 , 0.24663333,
+    0.24532558, 0.24393036, 0.24247998, 0.24092303, 0.23931621, 0.23757501,
+    0.23579155, 0.23385836, 0.23186082, 0.22975416, 0.22753042, 0.22523787,
+    0.22279557, 0.22026374, 0.21765214, 0.2148911 , 0.21203983, 0.20910422,
+    0.20605986, 0.20289957, 0.19965924, 0.19633992, 0.19294399, 0.18945376,
+    0.18589623, 0.18228559, 0.17863001, 0.17493902, 0.17122338, 0.16749485,
+    0.16376586, 0.16004918, 0.15635755, 0.15270334, 0.14909821, 0.1455525,
+    0.14207653, 0.13867843, 0.13536508, 0.13214209, 0.12901386, 0.12598366,
+    0.12305378, 0.12022564, 0.11749993, 0.11487682, 0.11235599, 0.10993683,
+    0.10761855, 0.10540021, 0.10328085, 0.10125952, 0.09933535, 0.09750756,
+    0.09577548, 0.09413856, 0.09259879, 0.09115321, 0.08980156, 0.08854366,
+    0.08737943, 0.0863088 , 0.08533173, 0.08444816, 0.08365874, 0.08296295,
+    0.08235974, 0.08184879, 0.08142963, 0.0811017 , 0.08086426, 0.08071652,
+    0.08065732, 0.08068528, 0.08079915, 0.08099759, 0.08127912, 0.08164221,
+    0.08208509, 0.08260558, 0.08320245, 0.08387405, 0.08461872, 0.08543485,
+    0.08632086, 0.08727529, 0.08829637, 0.08938269, 0.09053368, 0.09174839,
+    0.09302606, 0.09436609, 0.09576809, 0.0972319 , 0.09875759, 0.10034547,
+    0.10199611, 0.10371036, 0.10548936, 0.10733453, 0.10924784, 0.11123145,
+    0.1132879 , 0.11542014, 0.11763156, 0.1199261 , 0.12230825, 0.12478303,
+    0.12735611, 0.13003383, 0.13282332, 0.1357325 , 0.13877022, 0.14194733,
+    0.14527745, 0.14877095, 0.15244171, 0.15631008, 0.16039526, 0.16471264,
+    0.16929903, 0.17416921, 0.17936813, 0.18491671, 0.19087146, 0.19726367,
+    0.20413699, 0.21153645, 0.2195002 , 0.22805202, 0.23719324, 0.24689635,
+    0.25710328, 0.26773032, 0.27867898, 0.2898494 , 0.30115214, 0.31251542,
+    0.3238872 , 0.33520885, 0.34646952, 0.35766645, 0.36875439, 0.37976247,
+    0.39066078, 0.40148085, 0.41217762, 0.42279543, 0.4333259 , 0.44374506,
+    0.45408352, 0.46434054, 0.47451587, 0.48459837, 0.49459666, 0.50451736,
+    0.51436098, 0.5241281 , 0.53381937, 0.54343549, 0.55297718, 0.56244513,
+    0.57184006, 0.58116264, 0.59041354, 0.59959339, 0.60870282, 0.61774243,
+    0.62671284, 0.63561464, 0.64444846, 0.65321495, 0.6619148 , 0.67054874,
+    0.67911758, 0.68762222, 0.69606366, 0.70444301, 0.71276152, 0.72102061,
+    0.7292224 , 0.73736877, 0.74546167, 0.75350327, 0.76149601, 0.76944257,
+    0.7773459 , 0.78520926, 0.79303621, 0.80083062, 0.80859672, 0.81633909,
+    0.82406267, 0.83177277, 0.83947509, 0.84717571, 0.85488105, 0.86259792,
+    0.87033341, 0.87809481, 0.88590073, 0.89376265, 0.90168051, 0.90966175,
+    0.9177585 , 0.92595735, 0.93431661, 0.94285311, 0.95166927, 0.96090167,
+    0.97095595, 0.97849108, 0.98057884, 0.98147471]), np.ones(256))
+
+# Used to reconstruct the colormap in viscm
+parameters = {'xp': [17.623025510286254, 20.414094090828513,
+                    -82.390265292478205, -3.3099888437807294, -5.170701230808902],
+              'yp': [12.406964380648589, -98.305422647527877, 52.412280701754383,
+                     34.735513024986687, 22.175704412546509],
+              'min_Jp': 13.5507921715,
+              'max_Jp': 93.8863000932}
+
+
+color_map_luts["cm_candidate_ng"] = (array(
+  [ 0.22330277, 0.22677033, 0.23017935, 0.23353169, 0.23681402, 0.2400368,
+    0.24320742, 0.24631505, 0.24936304, 0.25236366, 0.25530723, 0.25819299,
+    0.2610367 , 0.26382794, 0.26656596, 0.26926798, 0.2719204 , 0.27452761,
+    0.27710562, 0.27963477, 0.28213047, 0.28460423, 0.28702736, 0.28943233,
+    0.29181274, 0.29415763, 0.29649262, 0.29879558, 0.30108328, 0.30335993,
+    0.30560745, 0.30785443, 0.31007522, 0.31229044, 0.31449347, 0.31668024,
+    0.3188655 , 0.32102524, 0.32318876, 0.32532296, 0.32745808, 0.32956694,
+    0.33167014, 0.33374679, 0.33581262, 0.33784825, 0.33986937, 0.34185319,
+    0.34382043, 0.34573964, 0.34763692, 0.34948172, 0.35129044, 0.35304952,
+    0.35475122, 0.35640626, 0.3579826 , 0.35949714, 0.36094336, 0.36229195,
+    0.3635597 , 0.36473836, 0.36579828, 0.366748  , 0.36758102, 0.36828691,
+    0.36883982, 0.3692409 , 0.36948262, 0.36955344, 0.36944189, 0.36913688,
+    0.36862806, 0.36790621, 0.36696373, 0.36579487, 0.36439699, 0.36276941,
+    0.3609152 , 0.35884058, 0.35655508, 0.35407138, 0.35140496, 0.34857371,
+    0.34559732, 0.34249671, 0.33929342, 0.33600904, 0.3326647 , 0.32928363,
+    0.32588267, 0.3224787 , 0.31908743, 0.31572297, 0.31239786, 0.30912306,
+    0.30590808, 0.30276102, 0.2996887 , 0.2966967 , 0.29378953, 0.29097065,
+    0.2882426 , 0.28560705, 0.28306489, 0.28061848, 0.27826751, 0.27600835,
+    0.27383916, 0.27175753, 0.26976059, 0.26784495, 0.2660068 , 0.26424192,
+    0.26254879, 0.26092124, 0.2593516 , 0.25783427, 0.25636334, 0.25493271,
+    0.25353648, 0.25217148, 0.25082665, 0.2494952 , 0.24817023, 0.24684479,
+    0.24551313, 0.24416844, 0.2428016 , 0.24140563, 0.23997368, 0.23849902,
+    0.23697621, 0.2353966 , 0.23375381, 0.23204161, 0.23025401, 0.22838496,
+    0.22642823, 0.22437847, 0.22223061, 0.21997986, 0.21762015, 0.21514715,
+    0.21255756, 0.20984779, 0.20701463, 0.20405362, 0.20095999, 0.19773434,
+    0.19437503, 0.1908809 , 0.18725135, 0.1834808 , 0.17957218, 0.17552885,
+    0.17135306, 0.16704809, 0.1626184 , 0.15806468, 0.15339359, 0.14861956,
+    0.14375457, 0.13881352, 0.13381484, 0.12878121, 0.12374048, 0.11871631,
+    0.11375875, 0.10891932, 0.10425825, 0.09984694, 0.09576875, 0.09211897,
+    0.08900344, 0.08653544, 0.08483018, 0.08399726, 0.08413159, 0.08530458,
+    0.08755744, 0.09089837, 0.09530427, 0.10072634, 0.1070978 , 0.11434195,
+    0.12237919, 0.13113212, 0.14052892, 0.15050498, 0.16100355, 0.17197546,
+    0.18338793, 0.19520014, 0.20738006, 0.21990121, 0.23274066, 0.2459009,
+    0.25934606, 0.27305848, 0.28703874, 0.30127492, 0.3157365 , 0.33043715,
+    0.34534588, 0.3604517 , 0.37574975, 0.39120759, 0.40682457, 0.42256559,
+    0.4384161 , 0.45435289, 0.47033772, 0.48636436, 0.50238444, 0.51837124,
+    0.53430503, 0.55014493, 0.56586449, 0.58144061, 0.59684533, 0.61205655,
+    0.62705581, 0.64182617, 0.6563544 , 0.67063044, 0.68464533, 0.69839355,
+    0.71187229, 0.72507912, 0.73801226, 0.75067321, 0.76306221, 0.77518048,
+    0.7870294 , 0.79860909, 0.8099203 , 0.82096426, 0.83173724, 0.84223591,
+    0.85245837, 0.86239779, 0.87204252, 0.88137985, 0.89039307, 0.89906019,
+    0.90735227, 0.91523111, 0.92264608, 0.92953025, 0.93579566, 0.94132897,
+    0.9459897 , 0.94961072, 0.95202797, 0.95313791]), array(
+  [ 0.02115217, 0.02435766, 0.02770894, 0.03120549, 0.03486176, 0.03866843,
+    0.04255067, 0.04636284, 0.05011635, 0.05380853, 0.05745319, 0.06105552,
+    0.06460953, 0.06812624, 0.07160913, 0.07505172, 0.07846425, 0.08184654,
+    0.08519339, 0.08851553, 0.09180774, 0.09506735, 0.09830606, 0.10151324,
+    0.10469233, 0.1078471 , 0.11097106, 0.11407044, 0.11714148, 0.12018399,
+    0.12320135, 0.12618924, 0.12915144, 0.13208572, 0.13499318, 0.13787426,
+    0.14072906, 0.14355817, 0.14636281, 0.14914232, 0.1518996 , 0.15463338,
+    0.1573473 , 0.16004005, 0.16271589, 0.16537333, 0.16801777, 0.17064683,
+    0.17326807, 0.1758771 , 0.17848374, 0.1810841 , 0.18368643, 0.18629175,
+    0.18890301, 0.19152787, 0.19416467, 0.19682321, 0.19950818, 0.20222144,
+    0.20497307, 0.20776897, 0.21061428, 0.21351848, 0.21648954, 0.21953572,
+    0.22266685, 0.22589265, 0.22922275, 0.23266724, 0.23623633, 0.23994008,
+    0.24378808, 0.24778908, 0.25195046, 0.25627786, 0.26077448, 0.26544111,
+    0.27027536, 0.27527178, 0.2804218 , 0.28571395, 0.29113426, 0.29666673,
+    0.30229403, 0.30799808, 0.31376071, 0.31956423, 0.3253919 , 0.33122689,
+    0.33705636, 0.34286842, 0.34865273, 0.35440061, 0.36010493, 0.36576003,
+    0.37136152, 0.3769062 , 0.38239183, 0.38781708, 0.39318132, 0.39848456,
+    0.40372731, 0.40891054, 0.41403551, 0.41910295, 0.42411461, 0.42907338,
+    0.43398142, 0.43884093, 0.44365421, 0.4484236 , 0.45315147, 0.45784019,
+    0.46249111, 0.46710703, 0.4716911 , 0.47624563, 0.48077291, 0.48527518,
+    0.4897545 , 0.49421204, 0.49865123, 0.50307409, 0.50748262, 0.51187871,
+    0.51626387, 0.52063984, 0.52500891, 0.5293727 , 0.53373275, 0.5380905,
+    0.54244704, 0.5468042 , 0.55116316, 0.55552505, 0.55989088, 0.56426165,
+    0.56863832, 0.57302164, 0.57741226, 0.58181071, 0.58621776, 0.59063378,
+    0.59505889, 0.59949327, 0.60393701, 0.60839035, 0.61285357, 0.61732602,
+    0.62180742, 0.62629743, 0.63079562, 0.63530232, 0.63981655, 0.64433739,
+    0.64886414, 0.65339601, 0.65793219, 0.66247241, 0.66701578, 0.6715607,
+    0.67610609, 0.68065087, 0.68519387, 0.6897339 , 0.69426971, 0.69880098,
+    0.70332548, 0.7078417 , 0.71234818, 0.71684345, 0.72132594, 0.72579406,
+    0.73024615, 0.73468052, 0.73909539, 0.74348893, 0.74785925, 0.75220438,
+    0.75652229, 0.76081087, 0.76506792, 0.76929116, 0.77347824, 0.77762671,
+    0.78173401, 0.78579751, 0.78981448, 0.79378208, 0.79769739, 0.80155739,
+    0.80535919, 0.80909916, 0.81277394, 0.81638014, 0.81991431, 0.82337227,
+    0.82675067, 0.83004601, 0.8332539 , 0.83637039, 0.83939294, 0.84231614,
+    0.84513786, 0.84785477, 0.85046308, 0.85296202, 0.85534788, 0.85762123,
+    0.85978086, 0.86182713, 0.86376341, 0.86558903, 0.86731067, 0.86893272,
+    0.87045909, 0.87189851, 0.87325813, 0.87454523, 0.87576896, 0.87693806,
+    0.87806108, 0.87914681, 0.88020359, 0.88123929, 0.88226176, 0.88327808,
+    0.88429462, 0.88531766, 0.88635321, 0.8874062 , 0.88848178, 0.88958464,
+    0.89071918, 0.89189007, 0.89310154, 0.89435739, 0.89566299, 0.89702343,
+    0.89844289, 0.89992722, 0.90148396, 0.90312081, 0.90484675, 0.90667256,
+    0.90861145, 0.91068001, 0.91289948, 0.91529744, 0.91790984, 0.92078319,
+    0.92397484, 0.92755584, 0.93160009, 0.93616295]), array(
+  [ 0.00202189, 0.00551406, 0.00964551, 0.01445093, 0.02004956, 0.0264401,
+    0.03362203, 0.04168051, 0.04990578, 0.05800892, 0.06608536, 0.07416013,
+    0.0821575 , 0.0901558 , 0.09817059, 0.10611951, 0.11408814, 0.12206225,
+    0.12997097, 0.13792374, 0.14585317, 0.15371495, 0.16165622, 0.16953288,
+    0.17738626, 0.18527689, 0.19309761, 0.20095943, 0.20879604, 0.21659823,
+    0.22445639, 0.2322505 , 0.24010403, 0.2479312 , 0.25576831, 0.26363933,
+    0.27148133, 0.27940784, 0.28728597, 0.29527021, 0.30321929, 0.31125895,
+    0.3192891 , 0.32740412, 0.33552351, 0.34373191, 0.35194602, 0.36026328,
+    0.36857416, 0.37701245, 0.38544163, 0.39398549, 0.40255076, 0.41117865,
+    0.41988038, 0.42858781, 0.43740831, 0.44624508, 0.45509757, 0.46404412,
+    0.47298933, 0.48193114, 0.49093036, 0.499919  , 0.50887858, 0.51779951,
+    0.52670573, 0.53554666, 0.54429458, 0.55292889, 0.56142589, 0.56975888,
+    0.57789829, 0.58581211, 0.59346653, 0.60082694, 0.60785824, 0.6145276,
+    0.62080425, 0.6266615 , 0.63207769, 0.63703716, 0.64153073, 0.64555596,
+    0.64911698, 0.65222401, 0.65489262, 0.65714283, 0.65899809, 0.66048517,
+    0.66163121, 0.66246411, 0.66301175, 0.6633014 , 0.66335931, 0.66321038,
+    0.66287804, 0.66238411, 0.66174876, 0.66099052, 0.66012634, 0.65917165,
+    0.65814046, 0.6570454 , 0.6558979 , 0.65471   , 0.6534912 , 0.65224774,
+    0.65098687, 0.64971498, 0.64843772, 0.64716007, 0.64588635, 0.64462029,
+    0.64336792, 0.64213086, 0.64090915, 0.63970452, 0.6385183 , 0.6373514,
+    0.63620481, 0.63508181, 0.63397823, 0.63289364, 0.63182733, 0.63077828,
+    0.62974631, 0.62873002, 0.62772575, 0.62673148, 0.62574501, 0.62476397,
+    0.62378672, 0.62280874, 0.62182708, 0.62083861, 0.61984011, 0.61882799,
+    0.61779832, 0.61674751, 0.61567191, 0.6145678 , 0.61343038, 0.61225577,
+    0.61104058, 0.60978098, 0.60847311, 0.60711217, 0.60569296, 0.60421341,
+    0.6026698 , 0.60105844, 0.59937571, 0.59761482, 0.5957737 , 0.59385008,
+    0.59184061, 0.58974201, 0.5875511 , 0.58526193, 0.58287108, 0.58037826,
+    0.57778061, 0.57507533, 0.57225971, 0.56933109, 0.5662869 , 0.56311922,
+    0.55983012, 0.55641792, 0.55288035, 0.5492152 , 0.54542033, 0.54149367,
+    0.53743319, 0.53323696, 0.5289031 , 0.52442983, 0.51981546, 0.51505838,
+    0.51015712, 0.50511032, 0.4999168 , 0.49457554, 0.4890857 , 0.48344672,
+    0.47765827, 0.47172037, 0.46563338, 0.45939808, 0.45301574, 0.44648817,
+    0.43980205, 0.43297192, 0.42600399, 0.4189032 , 0.41167556, 0.40430087,
+    0.39681183, 0.38922019, 0.38152054, 0.37372536, 0.36586902, 0.35794158,
+    0.34998244, 0.34201236, 0.33404905, 0.32613936, 0.31830265, 0.31059314,
+    0.30304889, 0.29571869, 0.28866636, 0.28193158, 0.27559011, 0.26970074,
+    0.26431775, 0.25950931, 0.25533028, 0.25182957, 0.24905318, 0.24703636,
+    0.24580423, 0.24537215, 0.2457446 , 0.24691602, 0.2488721 , 0.25159047,
+    0.25504271, 0.25919611, 0.26401499, 0.26946277, 0.27550279, 0.28209979,
+    0.28922076, 0.29683505, 0.30491583, 0.31344099, 0.32238968, 0.33174591,
+    0.34150092, 0.35164825, 0.36218311, 0.37310746, 0.38442836, 0.39615832,
+    0.40831552, 0.42092364, 0.4340111 , 0.44760871, 0.46174485, 0.47643618,
+    0.49169001, 0.50743951, 0.52351526, 0.53960817]), np.ones(256))
+
+# Used to reconstruct the colormap in viscm
+parameters = {'xp': [-2.3569023569023386, 29.24031986531989, 21.948653198653204, -25.44718013468011, -4.78745791245791],
+              'yp': [-27.604166666666657, -30.642361111111086, 24.652777777777771, -13.6284722222222, 23.4375],
+              'min_Jp': 15,
+              'max_Jp': 95}
+
+color_map_luts['cm_candidate_kk'] = (array(
+  [ 0.07873808, 0.08503098, 0.09119215, 0.09725944, 0.10324966, 0.10914691,
+    0.1149903 , 0.12076614, 0.12647234, 0.13214487, 0.13775951, 0.14331952,
+    0.14885405, 0.15434127, 0.15978387, 0.16520148, 0.17058327, 0.17592717,
+    0.1812416 , 0.18653223, 0.19178949, 0.19701509, 0.20221806, 0.20739605,
+    0.21254477, 0.21766522, 0.22276163, 0.22783646, 0.232884  , 0.23790477,
+    0.24289917, 0.24786997, 0.25281796, 0.25773939, 0.26263436, 0.26750288,
+    0.27234491, 0.27716076, 0.28195253, 0.28671682, 0.29145343, 0.29616211,
+    0.30084257, 0.30549451, 0.31011758, 0.31471143, 0.31927567, 0.32380992,
+    0.32831456, 0.3327882 , 0.33723043, 0.34164086, 0.34601907, 0.35036466,
+    0.35467722, 0.35895634, 0.36320162, 0.36741265, 0.37158905, 0.37573041,
+    0.37983636, 0.38390652, 0.38794052, 0.391938  , 0.3958986 , 0.39982199,
+    0.40370783, 0.40755579, 0.41136559, 0.41513702, 0.41886962, 0.42256312,
+    0.42621724, 0.42983171, 0.43340628, 0.43694071, 0.44043477, 0.44388826,
+    0.44730096, 0.4506727 , 0.4540033 , 0.45729265, 0.46054056, 0.46374691,
+    0.4669116 , 0.47003456, 0.47311572, 0.47615505, 0.47915255, 0.48210822,
+    0.48502208, 0.4878942 , 0.49072469, 0.49351365, 0.49626124, 0.49896763,
+    0.50163303, 0.50425765, 0.50684177, 0.50938571, 0.51188977, 0.51435433,
+    0.51677977, 0.5191665 , 0.52151498, 0.52382573, 0.52609922, 0.52833601,
+    0.53053664, 0.53270171, 0.53483184, 0.53692768, 0.53898991, 0.54101933,
+    0.54301655, 0.5449823 , 0.54691736, 0.54882248, 0.55069849, 0.55254618,
+    0.55436641, 0.55616019, 0.55792828, 0.55967151, 0.56139081, 0.56308707,
+    0.56476124, 0.56641427, 0.56804712, 0.56966078, 0.57125625, 0.57283474,
+    0.5743971 , 0.57594437, 0.57747763, 0.578998  , 0.58050659, 0.58200457,
+    0.58349311, 0.58497344, 0.58644679, 0.58791447, 0.58937777, 0.59083808,
+    0.59229669, 0.59375499, 0.59521431, 0.59667599, 0.5981412 , 0.59961095,
+    0.60108588, 0.60256604, 0.60405059, 0.60553731, 0.60702199, 0.60849757,
+    0.60995371, 0.61137672, 0.61275043, 0.61405949, 0.61529472, 0.61645863,
+    0.61756755, 0.6186476 , 0.61972621, 0.62082374, 0.62195065, 0.62310898,
+    0.62429421, 0.62549895, 0.62671518, 0.62793547, 0.62915284, 0.63036156,
+    0.63155892, 0.63274216, 0.63390941, 0.63505915, 0.6361885 , 0.63730024,
+    0.63839517, 0.63947435, 0.64053552, 0.64158527, 0.64262677, 0.64365947,
+    0.64469056, 0.6457271 , 0.6467694 , 0.6478306 , 0.64891699, 0.65003829,
+    0.65120839, 0.65243764, 0.6537444 , 0.65514254, 0.65665209, 0.65829045,
+    0.66007696, 0.66202922, 0.66416348, 0.66649284, 0.66902763, 0.67177387,
+    0.67473363, 0.6779068 , 0.68128823, 0.68487229, 0.68865042, 0.69261428,
+    0.69675486, 0.70106274, 0.7055261 , 0.71013753, 0.71488908, 0.71977295,
+    0.72478197, 0.72990967, 0.73515031, 0.74049508, 0.74593782, 0.75147747,
+    0.75711082, 0.76283528, 0.76864883, 0.77453293, 0.78049489, 0.78653899,
+    0.79266478, 0.7988389 , 0.80509156, 0.81142348, 0.81779745, 0.82424433,
+    0.83076477, 0.83731914, 0.84395228, 0.85063328, 0.8573683 , 0.86417388,
+    0.87100664, 0.87792232, 0.88485711, 0.89186942, 0.898911  , 0.90601831,
+    0.91316089, 0.92036241, 0.92760063, 0.93489628, 0.94222522, 0.94961559,
+    0.95703072, 0.96451696, 0.97201416, 0.97959794]), array(
+  [ 0.02380049, 0.02762946, 0.0314955 , 0.03538367, 0.03929263, 0.04314916,
+    0.04681625, 0.05034685, 0.05376738, 0.05706764, 0.06028584, 0.06343363,
+    0.06649987, 0.06951333, 0.0724811 , 0.07539619, 0.07827446, 0.0811238,
+    0.08394364, 0.08673511, 0.08950972, 0.0922702 , 0.09501404, 0.0977463,
+    0.10047279, 0.10319545, 0.10591402, 0.10862929, 0.11134673, 0.11406773,
+    0.11679361, 0.11952425, 0.12226063, 0.12500588, 0.12776095, 0.13052669,
+    0.13330389, 0.13609311, 0.13889411, 0.1417089 , 0.14453802, 0.14738192,
+    0.15024103, 0.15311572, 0.15600633, 0.15891314, 0.1618364 , 0.16477634,
+    0.16773294, 0.17070662, 0.17369747, 0.17670558, 0.17973101, 0.18277379,
+    0.18583395, 0.18891149, 0.19200641, 0.19511868, 0.19824828, 0.20139517,
+    0.20455931, 0.20774066, 0.21093915, 0.21415474, 0.21738737, 0.22063697,
+    0.22390351, 0.22718691, 0.23048713, 0.23380417, 0.23713792, 0.24048832,
+    0.2438553 , 0.24723882, 0.25063881, 0.25405521, 0.25748797, 0.26093702,
+    0.26440229, 0.26788372, 0.27138123, 0.27489488, 0.27842445, 0.28196986,
+    0.28553099, 0.28910775, 0.29270002, 0.29630767, 0.29993058, 0.30356863,
+    0.30722174, 0.31088964, 0.31457215, 0.3182691 , 0.32198029, 0.32570551,
+    0.32944456, 0.33319721, 0.33696322, 0.34074232, 0.34453424, 0.34833872,
+    0.35215547, 0.35598423, 0.35982468, 0.36367647, 0.36753933, 0.37141294,
+    0.37529698, 0.37919113, 0.38309506, 0.38700843, 0.39093088, 0.394862,
+    0.39880154, 0.40274917, 0.40670453, 0.41066731, 0.41463715, 0.41861372,
+    0.42259669, 0.4265856 , 0.43058022, 0.43458024, 0.43858535, 0.44259523,
+    0.44660955, 0.45062802, 0.45465032, 0.45867614, 0.46270517, 0.466737,
+    0.47077145, 0.47480823, 0.47884705, 0.48288761, 0.48692962, 0.4909728,
+    0.49501685, 0.49906149, 0.50310643, 0.5071514 , 0.51119613, 0.51524033,
+    0.51928381, 0.52332635, 0.52736778, 0.531408  , 0.53544701, 0.53948493,
+    0.54352212, 0.54755923, 0.55159738, 0.55563825, 0.55968439, 0.56373935,
+    0.56780781, 0.57189523, 0.57600725, 0.58014816, 0.58431885, 0.58851523,
+    0.59272844, 0.59694739, 0.6011624 , 0.6053679 , 0.60956269, 0.61374861,
+    0.61792946, 0.62210956, 0.62629294, 0.63048304, 0.63468291, 0.63889495,
+    0.64312026, 0.64735999, 0.65161492, 0.65588577, 0.66017389, 0.66447826,
+    0.66879882, 0.67313546, 0.67748957, 0.68185887, 0.68624266, 0.69064192,
+    0.69505448, 0.69947837, 0.70391466, 0.70835911, 0.71281063, 0.71726683,
+    0.72172374, 0.72617917, 0.73062754, 0.73506547, 0.73948684, 0.74388673,
+    0.74825915, 0.7525986 , 0.75689972, 0.76115802, 0.76536957, 0.76953177,
+    0.77364343, 0.77770289, 0.78171225, 0.78567138, 0.78958223, 0.79344761,
+    0.79726965, 0.80105066, 0.8047929 , 0.80849913, 0.81217164, 0.81581257,
+    0.81942391, 0.82300747, 0.82656492, 0.83009816, 0.83360887, 0.83709793,
+    0.84056637, 0.844015  , 0.84744449, 0.85085836, 0.85425577, 0.85763616,
+    0.8609997 , 0.8643531 , 0.86769067, 0.87101222, 0.87432566, 0.87762463,
+    0.88090892, 0.88418748, 0.88745026, 0.89070437, 0.89394846, 0.89717856,
+    0.90040525, 0.90361483, 0.90682325, 0.91001579, 0.9132048 , 0.91638083,
+    0.9195519 , 0.92271171, 0.92586627, 0.92900992, 0.93214934, 0.93527665,
+    0.93840226, 0.94151285, 0.9446259 , 0.94771918]), array(
+  [ 0.45890713, 0.46137905, 0.46384563, 0.46630529, 0.46875421, 0.4711862,
+    0.47360008, 0.47599069, 0.47835461, 0.48068977, 0.48299219, 0.4852595,
+    0.48748847, 0.48967699, 0.49182306, 0.49392292, 0.4959753 , 0.49797881,
+    0.4999306 , 0.50182767, 0.50367037, 0.5054572 , 0.50718436, 0.50885097,
+    0.51045736, 0.51200237, 0.51348338, 0.51489809, 0.51624834, 0.51753338,
+    0.51875253, 0.51990356, 0.52098523, 0.52199968, 0.52294672, 0.52382624,
+    0.52463824, 0.52538248, 0.52605675, 0.52666444, 0.52720607, 0.52768224,
+    0.5280937 , 0.5284413 , 0.52872599, 0.52894886, 0.52911108, 0.5292139,
+    0.52925736, 0.52924451, 0.5291769 , 0.52905617, 0.52888406, 0.52866235,
+    0.5283929 , 0.52807762, 0.52771851, 0.52731757, 0.52687689, 0.52639856,
+    0.52588473, 0.52533756, 0.52475925, 0.524152  , 0.52351804, 0.5228596,
+    0.52217891, 0.5214782 , 0.52075951, 0.52002465, 0.51927646, 0.51851715,
+    0.51774891, 0.51697393, 0.51619438, 0.5154124 , 0.5146301 , 0.51384958,
+    0.51307291, 0.51230212, 0.51153923, 0.51078544, 0.51004343, 0.50931521,
+    0.50860267, 0.50790763, 0.50723191, 0.50657725, 0.50594539, 0.50533791,
+    0.50475611, 0.50420208, 0.50367737, 0.50318346, 0.50272179, 0.50229376,
+    0.50190069, 0.5015438 , 0.50122435, 0.50094363, 0.50070274, 0.50050273,
+    0.50034459, 0.50022925, 0.50015758, 0.50013055, 0.50014881, 0.50021302,
+    0.50032381, 0.50048172, 0.50068726, 0.50094086, 0.50124298, 0.50159412,
+    0.50199429, 0.50244368, 0.50294243, 0.50349062, 0.5040883 , 0.50473544,
+    0.50543197, 0.50617805, 0.5069733 , 0.50781742, 0.50871016, 0.5096512,
+    0.51064021, 0.51167679, 0.51276051, 0.5138909 , 0.51506743, 0.51628978,
+    0.51755709, 0.51886873, 0.52022401, 0.52162216, 0.52306241, 0.52454388,
+    0.52606564, 0.52762668, 0.5292259 , 0.53086207, 0.53253385, 0.53423975,
+    0.53597803, 0.53774675, 0.53954371, 0.54136636, 0.54321177, 0.54507652,
+    0.54695667, 0.54884775, 0.55074476, 0.55264242, 0.55453558, 0.55642011,
+    0.55829453, 0.56016223, 0.56203375, 0.56392814, 0.56587122, 0.56788936,
+    0.57000019, 0.57220545, 0.57449055, 0.57683063, 0.57919892, 0.58157275,
+    0.58393538, 0.58627588, 0.58858776, 0.59086745, 0.59311274, 0.59532221,
+    0.59749564, 0.59963222, 0.60173091, 0.60379008, 0.60580649, 0.60777924,
+    0.6097056 , 0.61158212, 0.61340237, 0.6151645 , 0.61686366, 0.61849128,
+    0.62004292, 0.62151305, 0.62288934, 0.62416816, 0.62533879, 0.62639158,
+    0.62731938, 0.6281096 , 0.62875725, 0.62925097, 0.62958736, 0.62976024,
+    0.62976899, 0.62961438, 0.62930119, 0.62883586, 0.62822926, 0.62749232,
+    0.62663432, 0.62567778, 0.62461908, 0.62348256, 0.62227864, 0.621005,
+    0.6196701 , 0.61828149, 0.61685751, 0.61539115, 0.61388335, 0.61233673,
+    0.61075292, 0.60913269, 0.607476  , 0.60579314, 0.60408651, 0.60234546,
+    0.60056748, 0.59874959, 0.59688839, 0.59502121, 0.59312158, 0.59117279,
+    0.58917064, 0.58718501, 0.58514178, 0.58303558, 0.58094288, 0.57879227,
+    0.5765793 , 0.57438349, 0.57210749, 0.56981243, 0.56748232, 0.56508082,
+    0.56269289, 0.56020367, 0.55773849, 0.5551784 , 0.55261699, 0.54997983,
+    0.54732492, 0.54460356, 0.54185741, 0.53904386, 0.53620824, 0.53329363,
+    0.53036982, 0.52734442, 0.52433316, 0.52118636]), np.ones(256))
+
 # Aliases
 color_map_luts['B-W LINEAR'] = color_map_luts['idl00']
 color_map_luts['BLUE'] = color_map_luts['idl01']


https://bitbucket.org/yt_analysis/yt/commits/3fdcbac38c1b/
Changeset:   3fdcbac38c1b
Branch:      yt
User:        MatthewTurk
Date:        2016-01-28 16:55:17+00:00
Summary:     Adding Cameron's candidate
Affected #:  1 file

diff -r cb86fb5fa4e2376621b0c03cdf930fa2071c446d -r 3fdcbac38c1b95f01f49bb80d8dc5d4745349a44 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -8232,6 +8232,149 @@
     0.54732492, 0.54460356, 0.54185741, 0.53904386, 0.53620824, 0.53329363,
     0.53036982, 0.52734442, 0.52433316, 0.52118636]), np.ones(256))
 
+# Used to reconstruct the colormap in viscm
+parameters = {'xp': [6.4995757388238928, -16.241760894839473,
+                -12.632024921242106, -21.656364855235495, 7.5824965309031143,
+                6.4995757388238928, 86.274740755325524, 15.884889270177041,
+                -11.188130531803154, 3.9727605573057474],
+              'yp': [-0.7838283828382373, -30.022689768976846,
+                -9.447194719471895, 6.7966171617162274, -0.7838283828382373,
+                20.152640264026445, 37.840346534653492, 13.294141914191471,
+                40.728135313531396, -0.7838283828382373],
+              'min_Jp': 3.96624472574,
+              'max_Jp': 96.2869198312}
+
+color_map_luts['cm_candidate_ch'] = (array(
+  [ 0.03522636, 0.03833067, 0.04137086, 0.04422592, 0.0469077 , 0.04949927,
+    0.05195494, 0.05435346, 0.05668617, 0.05895159, 0.06118936, 0.06333705,
+    0.0654643 , 0.06747923, 0.06945627, 0.07130747, 0.07309864, 0.07473997,
+    0.07630552, 0.0776979 , 0.07898537, 0.08010829, 0.08107311, 0.08190198,
+    0.08249749, 0.08292767, 0.083176  , 0.08316237, 0.08291788, 0.08242916,
+    0.08166884, 0.08060568, 0.07920431, 0.0774235 , 0.07522534, 0.07255839,
+    0.06937739, 0.0656378 , 0.06127894, 0.05615142, 0.05030332, 0.04372508,
+    0.03661499, 0.03005334, 0.02491817, 0.02190702, 0.02148032, 0.02375703,
+    0.02860561, 0.03577661, 0.04479347, 0.05435906, 0.06405802, 0.073712,
+    0.083229  , 0.09254701, 0.10166185, 0.11057105, 0.11925526, 0.12774985,
+    0.13603271, 0.14414105, 0.15206539, 0.15982306, 0.16742795, 0.17487481,
+    0.1821786 , 0.18934939, 0.19639219, 0.20330755, 0.21010013, 0.21677862,
+    0.22334613, 0.22980539, 0.2361588 , 0.24240872, 0.24855784, 0.25461006,
+    0.26057194, 0.26645523, 0.27228068, 0.27808294, 0.28391451, 0.28984425,
+    0.29594531, 0.30227382, 0.30885036, 0.31565791, 0.32265606, 0.32979907,
+    0.33704768, 0.34437291, 0.35175534, 0.35917963, 0.36664001, 0.37413354,
+    0.38165767, 0.38921099, 0.39679271, 0.40440245, 0.41203793, 0.41969968,
+    0.42738983, 0.43510843, 0.44285558, 0.45063135, 0.45843459, 0.46626598,
+    0.47412702, 0.48201778, 0.48993829, 0.49788858, 0.50586861, 0.51387941,
+    0.52192105, 0.52999354, 0.53809685, 0.54623165, 0.55439949, 0.56259924,
+    0.57083088, 0.57909441, 0.58738981, 0.59571933, 0.60408406, 0.61248174,
+    0.62091239, 0.62937605, 0.63787278, 0.64640265, 0.65496986, 0.6635733,
+    0.67221077, 0.68088234, 0.68958809, 0.69832812, 0.70710251, 0.71591134,
+    0.72475464, 0.73363239, 0.74254444, 0.75149049, 0.76046998, 0.76948199,
+    0.77852511, 0.7875972 , 0.79669513, 0.80581439, 0.81497372, 0.82415011,
+    0.83332691, 0.84252673, 0.85170847, 0.86088203, 0.86999225, 0.87899789,
+    0.88778843, 0.89611225, 0.90337493, 0.90860929, 0.91175598, 0.91370041,
+    0.91501944, 0.91599762, 0.91675056, 0.91732023, 0.91778036, 0.91811232,
+    0.91837528, 0.91855668, 0.91865995, 0.91869408, 0.91866521, 0.91857744,
+    0.91843343, 0.91823488, 0.91798277, 0.91767766, 0.91731981, 0.91690936,
+    0.91644637, 0.91593091, 0.91536311, 0.91474318, 0.91407949, 0.91336534,
+    0.91259789, 0.9117775 , 0.91090473, 0.90998027, 0.90900929, 0.90798541,
+    0.90690915, 0.90578172, 0.90460452, 0.90337725, 0.90209964, 0.90077411,
+    0.8994031 , 0.89798939, 0.89653293, 0.89503377, 0.8935014 , 0.89194155,
+    0.89036098, 0.88876764, 0.88716861, 0.88557213, 0.88399934, 0.88246708,
+    0.88099466, 0.87960391, 0.87831891, 0.87716549, 0.87617292, 0.87537354,
+    0.87479129, 0.87444814, 0.87436113, 0.87454145, 0.87499843, 0.87576485,
+    0.87680691, 0.87811326, 0.87968158, 0.88153454, 0.88361008, 0.88590523,
+    0.88844166, 0.89115133, 0.89406565, 0.89714468, 0.90038245, 0.90377593,
+    0.907298  , 0.91095738, 0.91472653, 0.91860958, 0.92259009, 0.92665895,
+    0.93081455, 0.93503101, 0.93931993, 0.9436451 , 0.94800146, 0.9523663,
+    0.95670453, 0.96101117, 0.96524557, 0.96939757, 0.97351615, 0.97761533,
+    0.9817668 , 0.98605907, 0.99050657, 0.99513559]), array(
+  [ 0.00717629, 0.00941214, 0.01190817, 0.01466782, 0.01769325, 0.02096064,
+    0.02448619, 0.02824312, 0.03223296, 0.0364546 , 0.04088298, 0.04530612,
+    0.04965416, 0.05396345, 0.05821592, 0.06244244, 0.06662853, 0.07080188,
+    0.07494638, 0.07908871, 0.08321444, 0.08733926, 0.09146285, 0.09558174,
+    0.09971703, 0.10385498, 0.10799951, 0.11216606, 0.11634821, 0.12054804,
+    0.12476983, 0.12901801, 0.13329717, 0.13761219, 0.1419667 , 0.14636562,
+    0.15081199, 0.1553078 , 0.1598563 , 0.16447093, 0.16913797, 0.17385302,
+    0.17859758, 0.18333009, 0.18798922, 0.19250917, 0.19683753, 0.20095108,
+    0.20485138, 0.2085565 , 0.21208884, 0.21547059, 0.2187219 , 0.22186023,
+    0.22490038, 0.22785619, 0.23073769, 0.23355385, 0.23631507, 0.23902539,
+    0.2416948 , 0.24432528, 0.24692442, 0.24949553, 0.25204168, 0.25456886,
+    0.25707918, 0.25957507, 0.26205962, 0.26453663, 0.26700889, 0.26947802,
+    0.27194668, 0.2744176 , 0.27689351, 0.27937719, 0.28187144, 0.28437889,
+    0.28690162, 0.28944039, 0.2919933 , 0.29455367, 0.29710773, 0.29963342,
+    0.30210294, 0.30448964, 0.30677635, 0.3089592 , 0.31104521, 0.31304651,
+    0.31497559, 0.31684275, 0.31865561, 0.32042051, 0.32214032, 0.32381667,
+    0.32545088, 0.32704374, 0.32859564, 0.33010679, 0.33157808, 0.33300923,
+    0.33439917, 0.33574763, 0.33705434, 0.33831896, 0.33954175, 0.34072209,
+    0.34185885, 0.34295161, 0.34399994, 0.34500336, 0.34596145, 0.34687315,
+    0.34773789, 0.34855512, 0.34932428, 0.35004438, 0.35071386, 0.35133271,
+    0.35190023, 0.35241573, 0.35287848, 0.35328622, 0.35363738, 0.35393254,
+    0.35417082, 0.35435127, 0.3544729 , 0.35453467, 0.35453246, 0.3544659,
+    0.35433547, 0.35413995, 0.35387806, 0.35354849, 0.35314988, 0.35268084,
+    0.35213998, 0.35152592, 0.35083734, 0.35007306, 0.34923213, 0.34831394,
+    0.34731844, 0.34624641, 0.34509977, 0.34388221, 0.3425743 , 0.34119867,
+    0.33977344, 0.33827666, 0.33675521, 0.33520455, 0.33369577, 0.33229408,
+    0.3311592 , 0.33064529, 0.3315562 , 0.33510297, 0.34120532, 0.34863721,
+    0.35665329, 0.36490048, 0.37324447, 0.38164035, 0.39000738, 0.39837187,
+    0.4066732 , 0.41493037, 0.42314374, 0.43130788, 0.43942037, 0.44748078,
+    0.4554899 , 0.46344929, 0.4713609 , 0.47922684, 0.48704928, 0.49483028,
+    0.50257176, 0.51027545, 0.51794292, 0.52557548, 0.53316833, 0.54072793,
+    0.5482575 , 0.55575771, 0.56322906, 0.57067187, 0.57808347, 0.58546876,
+    0.59282791, 0.60016055, 0.60746619, 0.61474529, 0.6219982 , 0.62922358,
+    0.6364201 , 0.64358621, 0.65072187, 0.65782681, 0.66489576, 0.67192551,
+    0.67891244, 0.68585233, 0.69274151, 0.69957569, 0.70634483, 0.71304131,
+    0.71965675, 0.72618213, 0.73260803, 0.73892495, 0.7451228 , 0.75119134,
+    0.75712447, 0.76291767, 0.76856894, 0.77407887, 0.77944929, 0.78467669,
+    0.78977658, 0.79475738, 0.79962537, 0.80438334, 0.8090495 , 0.81362981,
+    0.81812939, 0.822561  , 0.82692955, 0.83124292, 0.83550709, 0.83972927,
+    0.84391222, 0.84806526, 0.8521879 , 0.85628939, 0.8603707 , 0.86443499,
+    0.86849289, 0.8725333 , 0.87658679, 0.88063259, 0.88469332, 0.88877587,
+    0.89287903, 0.8970406 , 0.90125214, 0.90552454, 0.90987168, 0.91426387,
+    0.91867981, 0.92310252, 0.92749634, 0.93184435]), array(
+  [ 0.01748575, 0.02188322, 0.02672053, 0.0320049 , 0.0377461 , 0.04374005,
+    0.04967379, 0.05549667, 0.06125422, 0.06697896, 0.07265592, 0.07836626,
+    0.08404944, 0.08980628, 0.09555785, 0.10139442, 0.1072383 , 0.1131781,
+    0.11912453, 0.12517083, 0.13123399, 0.13736417, 0.14354194, 0.14973423,
+    0.15601774, 0.16231425, 0.16862456, 0.17499629, 0.1813827 , 0.18777277,
+    0.19416216, 0.20054403, 0.20690794, 0.21323963, 0.21951507, 0.22570772,
+    0.23177787, 0.23767532, 0.24334283, 0.24872311, 0.25369558, 0.25814289,
+    0.26191917, 0.26487715, 0.26692571, 0.26808099, 0.2684773 , 0.26831057,
+    0.26778035, 0.26703712, 0.26618953, 0.26531302, 0.26445711, 0.26365339,
+    0.26292142, 0.26228611, 0.26174391, 0.26129748, 0.26096479, 0.26072575,
+    0.26060175, 0.26057155, 0.26064668, 0.26081902, 0.2610823 , 0.26144311,
+    0.26189315, 0.26242706, 0.26304235, 0.26373904, 0.26451357, 0.26535872,
+    0.26627022, 0.2672428 , 0.26826982, 0.26934258, 0.27044939, 0.27157424,
+    0.27269501, 0.27378145, 0.27479358, 0.27568221, 0.27639444, 0.27688646,
+    0.27714116, 0.27717994, 0.27705717, 0.27683892, 0.27658103, 0.27631892,
+    0.27606894, 0.27583456, 0.27561248, 0.27539915, 0.27518661, 0.27496758,
+    0.27473682, 0.27448999, 0.27422348, 0.27393436, 0.27362188, 0.27328355,
+    0.2729159 , 0.27251738, 0.27208666, 0.27162255, 0.27112496, 0.27059242,
+    0.2700227 , 0.26941486, 0.26876801, 0.26808128, 0.26735382, 0.26658388,
+    0.26577047, 0.26491267, 0.26400955, 0.26305951, 0.26206006, 0.26101115,
+    0.25991162, 0.25876029, 0.25755586, 0.25629478, 0.25497448, 0.25359554,
+    0.25215625, 0.25065475, 0.24908901, 0.24745683, 0.24575146, 0.24397141,
+    0.24211619, 0.24018264, 0.23816727, 0.23606623, 0.23387523, 0.23158951,
+    0.22920377, 0.22671207, 0.22410782, 0.22138361, 0.21853118, 0.2155413,
+    0.21240363, 0.20910667, 0.20563757, 0.20198213, 0.19808683, 0.19395191,
+    0.18956227, 0.18482577, 0.17973484, 0.17416272, 0.16804944, 0.16121776,
+    0.15345263, 0.14450283, 0.13440947, 0.124886  , 0.11850527, 0.11530169,
+    0.11429895, 0.11482246, 0.11642615, 0.11883084, 0.12186164, 0.12538522,
+    0.12930681, 0.1335561 , 0.13808108, 0.14284015, 0.1478005 , 0.15293632,
+    0.15822735, 0.16365776, 0.16921525, 0.1748903 , 0.1806757 , 0.18656606,
+    0.19255748, 0.1986473 , 0.2048339 , 0.2111165 , 0.21749036, 0.22396009,
+    0.23052897, 0.23719906, 0.2439729 , 0.25085347, 0.25784138, 0.26494541,
+    0.27217054, 0.27952188, 0.28700505, 0.29462764, 0.30239803, 0.31032349,
+    0.31841207, 0.3266725 , 0.33511695, 0.34375899, 0.35260525, 0.36166667,
+    0.37095463, 0.38048072, 0.39025901, 0.40030417, 0.41061811, 0.42120758,
+    0.43207661, 0.44322563, 0.45465067, 0.46634268, 0.47828317, 0.49044181,
+    0.50279317, 0.51530648, 0.52795044, 0.54069578, 0.55350777, 0.56628119,
+    0.57905927, 0.59183607, 0.60457689, 0.61715156, 0.62970523, 0.64220124,
+    0.65448412, 0.66677747, 0.67887524, 0.6909046 , 0.70283761, 0.71461871,
+    0.72635938, 0.73792219, 0.74945264, 0.76083717, 0.77214931, 0.7834029,
+    0.79449887, 0.80567531, 0.81656784, 0.82755657, 0.83843574, 0.84923937,
+    0.86013357, 0.87076849, 0.88140532, 0.89198331, 0.90217129, 0.91220561,
+    0.922003  , 0.93137403, 0.94060562, 0.94975586]), np.ones(256))
+
 # Aliases
 color_map_luts['B-W LINEAR'] = color_map_luts['idl00']
 color_map_luts['BLUE'] = color_map_luts['idl01']


https://bitbucket.org/yt_analysis/yt/commits/88d35715b7c8/
Changeset:   88d35715b7c8
Branch:      yt
User:        MatthewTurk
Date:        2016-01-28 17:03:03+00:00
Summary:     Anonymizing candidates
Affected #:  1 file

diff -r 3fdcbac38c1b95f01f49bb80d8dc5d4745349a44 -r 88d35715b7c871663a57994384acd76872b02b76 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7824,7 +7824,7 @@
                       -16.059027777777771],
                'min_Jp': 17.1875,
                'max_Jp': 82.1875}
-color_map_luts["cm_candidate_mjt"] = (array(
+color_map_luts["cm_candidate_1"] = (array(
   [ 0.01845663, 0.01940818, 0.02066025, 0.02218966, 0.02395409, 0.02595033,
     0.02817596, 0.03060653, 0.03322304, 0.03602798, 0.03900455, 0.04208415,
     0.04516324, 0.04823603, 0.05128648, 0.05431253, 0.05730541, 0.06025524,
@@ -7964,7 +7964,7 @@
               'max_Jp': 93.8863000932}
 
 
-color_map_luts["cm_candidate_ng"] = (array(
+color_map_luts["cm_candidate_2"] = (array(
   [ 0.22330277, 0.22677033, 0.23017935, 0.23353169, 0.23681402, 0.2400368,
     0.24320742, 0.24631505, 0.24936304, 0.25236366, 0.25530723, 0.25819299,
     0.2610367 , 0.26382794, 0.26656596, 0.26926798, 0.2719204 , 0.27452761,
@@ -8101,7 +8101,7 @@
               'min_Jp': 15,
               'max_Jp': 95}
 
-color_map_luts['cm_candidate_kk'] = (array(
+color_map_luts['cm_candidate_3'] = (array(
   [ 0.07873808, 0.08503098, 0.09119215, 0.09725944, 0.10324966, 0.10914691,
     0.1149903 , 0.12076614, 0.12647234, 0.13214487, 0.13775951, 0.14331952,
     0.14885405, 0.15434127, 0.15978387, 0.16520148, 0.17058327, 0.17592717,
@@ -8244,7 +8244,7 @@
               'min_Jp': 3.96624472574,
               'max_Jp': 96.2869198312}
 
-color_map_luts['cm_candidate_ch'] = (array(
+color_map_luts['cm_candidate_4'] = (array(
   [ 0.03522636, 0.03833067, 0.04137086, 0.04422592, 0.0469077 , 0.04949927,
     0.05195494, 0.05435346, 0.05668617, 0.05895159, 0.06118936, 0.06333705,
     0.0654643 , 0.06747923, 0.06945627, 0.07130747, 0.07309864, 0.07473997,


https://bitbucket.org/yt_analysis/yt/commits/7a41688d7c0f/
Changeset:   7a41688d7c0f
Branch:      yt
User:        jzuhone
Date:        2016-03-02 19:51:35+00:00
Summary:     Merge
Affected #:  290 files

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -28,40 +28,38 @@
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/basic_octree.c
 yt/utilities/lib/bitarray.c
-yt/utilities/lib/CICDeposit.c
-yt/utilities/lib/ContourFinding.c
-yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/bounding_volume_hierarchy.c
+yt/utilities/lib/contour_finding.c
+yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/element_mappings.c
-yt/utilities/lib/FixedInterpolator.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
 yt/utilities/lib/image_utilities.c
-yt/utilities/lib/Interpolators.c
+yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
 yt/utilities/lib/line_integral_convolution.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_intersection.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
-yt/utilities/lib/Octree.c
-yt/utilities/lib/GridTree.c
+yt/utilities/lib/particle_mesh_operations.c
 yt/utilities/lib/origami.c
+yt/utilities/lib/particle_mesh_operations.c
 yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
-yt/utilities/lib/PointsInVolume.c
-yt/utilities/lib/QuadTree.c
-yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/points_in_volume.c
+yt/utilities/lib/quad_tree.c
+yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
-yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
-yt/utilities/lib/element_mappings.c
-yt/utilities/lib/mesh_construction.cpp
-yt/utilities/lib/mesh_samplers.cpp
-yt/utilities/lib/mesh_traversal.cpp
-yt/utilities/lib/mesh_intersection.cpp
 syntax: glob
 *.pyc
 .*.swp

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 clean.sh
--- a/clean.sh
+++ b/clean.sh
@@ -1,4 +1,1 @@
-find . -name "*.so" -exec rm -v {} \;
-find . -name "*.pyc" -exec rm -v {} \;
-find . -name "__config__.py" -exec rm -v {} \;
-rm -rvf build dist
+hg --config extensions.purge= purge --all yt

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ /dev/null
@@ -1,241 +0,0 @@
-import errno
-import os
-import shutil
-import string
-import re
-import tempfile
-import uuid
-from sphinx.util.compat import Directive
-from docutils import nodes
-from docutils.parsers.rst import directives
-from IPython.config import Config
-from IPython.nbconvert import html, python
-from IPython.nbformat import current as nbformat
-from runipy.notebook_runner import NotebookRunner, NotebookError
-
-class NotebookDirective(Directive):
-    """Insert an evaluated notebook into a document
-
-    This uses runipy and nbconvert to transform a path to an unevaluated notebook
-    into html suitable for embedding in a Sphinx document.
-    """
-    required_arguments = 1
-    optional_arguments = 1
-    option_spec = {'skip_exceptions': directives.flag}
-    final_argument_whitespace = True
-
-    def run(self): # check if there are spaces in the notebook name
-        nb_path = self.arguments[0]
-        if ' ' in nb_path: raise ValueError(
-            "Due to issues with docutils stripping spaces from links, white "
-            "space is not allowed in notebook filenames '{0}'".format(nb_path))
-        # check if raw html is supported
-        if not self.state.document.settings.raw_enabled:
-            raise self.warning('"%s" directive disabled.' % self.name)
-
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        os.chdir(tmpdir)
-
-        # get path to notebook
-        nb_filename = self.arguments[0]
-        nb_basename = os.path.basename(nb_filename)
-        rst_file = self.state_machine.document.attributes['source']
-        rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
-
-        # Move files around.
-        rel_dir = os.path.relpath(rst_dir, setup.confdir)
-        dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
-        dest_path = os.path.join(dest_dir, nb_basename)
-
-        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
-        # Ensure desination build directory exists
-        thread_safe_mkdir(os.path.dirname(dest_path))
-
-        # Copy unevaluated notebook
-        shutil.copyfile(nb_abs_path, dest_path)
-
-        # Construct paths to versions getting copied over
-        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
-        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
-        rel_path_eval = string.replace(nb_basename, '.ipynb', '_evaluated.ipynb')
-        rel_path_script = string.replace(nb_basename, '.ipynb', '.py')
-
-        # Create python script vesion
-        script_text = nb_to_python(nb_abs_path)
-        f = open(dest_path_script, 'w')
-        f.write(script_text.encode('utf8'))
-        f.close()
-
-        skip_exceptions = 'skip_exceptions' in self.options
-
-        ret = evaluate_notebook(
-            nb_abs_path, dest_path_eval, skip_exceptions=skip_exceptions)
-
-        try:
-            evaluated_text, resources = ret
-            evaluated_text = write_notebook_output(
-                resources, image_dir, image_rel_dir, evaluated_text)
-        except ValueError:
-            # This happens when a notebook raises an unhandled exception
-            evaluated_text = ret
-
-        # Create link to notebook and script files
-        link_rst = "(" + \
-                   formatted_link(nb_basename) + "; " + \
-                   formatted_link(rel_path_eval) + "; " + \
-                   formatted_link(rel_path_script) + \
-                   ")"
-
-        self.state_machine.insert_input([link_rst], rst_file)
-
-        # create notebook node
-        attributes = {'format': 'html', 'source': 'nb_path'}
-        nb_node = notebook_node('', evaluated_text, **attributes)
-        (nb_node.source, nb_node.line) = \
-            self.state_machine.get_source_and_line(self.lineno)
-
-        # add dependency
-        self.state.document.settings.record_dependencies.add(nb_abs_path)
-
-        # clean up
-        os.chdir(cwd)
-        shutil.rmtree(tmpdir, True)
-
-        return [nb_node]
-
-
-class notebook_node(nodes.raw):
-    pass
-
-def nb_to_python(nb_path):
-    """convert notebook to python script"""
-    exporter = python.PythonExporter()
-    output, resources = exporter.from_filename(nb_path)
-    return output
-
-def nb_to_html(nb_path):
-    """convert notebook to html"""
-    c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
-
-    exporter = html.HTMLExporter(template_file='full', config=c)
-    notebook = nbformat.read(open(nb_path), 'json')
-    output, resources = exporter.from_notebook_node(notebook)
-    header = output.split('<head>', 1)[1].split('</head>',1)[0]
-    body = output.split('<body>', 1)[1].split('</body>',1)[0]
-
-    # http://imgur.com/eR9bMRH
-    header = header.replace('<style', '<style scoped="scoped"')
-    header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n',
-                            '')
-    header = header.replace("code,pre{", "code{")
-
-    # Filter out styles that conflict with the sphinx theme.
-    filter_strings = [
-        'navbar',
-        'body{',
-        'alert{',
-        'uneditable-input{',
-        'collapse{',
-    ]
-
-    filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
-
-    line_begin = [
-        'pre{',
-        'p{margin'
-    ]
-
-    filterfunc = lambda x: not any([s in x for s in filter_strings])
-    header_lines = filter(filterfunc, header.split('\n'))
-
-    filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
-    header_lines = filter(filterfunc, header_lines)
-
-    header = '\n'.join(header_lines)
-
-    # concatenate raw html lines
-    lines = ['<div class="ipynotebook">']
-    lines.append(header)
-    lines.append(body)
-    lines.append('</div>')
-    return '\n'.join(lines), resources
-
-def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
-    # Create evaluated version and save it to the dest path.
-    notebook = nbformat.read(open(nb_path), 'json')
-    nb_runner = NotebookRunner(notebook, pylab=False)
-    try:
-        nb_runner.run_notebook(skip_exceptions=skip_exceptions)
-    except NotebookError as e:
-        print('')
-        print(e)
-        # Return the traceback, filtering out ANSI color codes.
-        # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
-        return "Notebook conversion failed with the " \
-               "following traceback: \n%s" % \
-            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '',
-                   str(e))
-
-    if dest_path is None:
-        dest_path = 'temp_evaluated.ipynb'
-    nbformat.write(nb_runner.nb, open(dest_path, 'w'), 'json')
-    ret = nb_to_html(dest_path)
-    if dest_path is 'temp_evaluated.ipynb':
-        os.remove(dest_path)
-    return ret
-
-def formatted_link(path):
-    return "`%s <%s>`__" % (os.path.basename(path), path)
-
-def visit_notebook_node(self, node):
-    self.visit_raw(node)
-
-def depart_notebook_node(self, node):
-    self.depart_raw(node)
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_node(notebook_node,
-                 html=(visit_notebook_node, depart_notebook_node))
-
-    app.add_directive('notebook', NotebookDirective)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-def make_image_dir(setup, rst_dir):
-    image_dir = setup.app.builder.outdir + os.path.sep + '_images'
-    rel_dir = os.path.relpath(setup.confdir, rst_dir)
-    image_rel_dir = rel_dir + os.path.sep + '_images'
-    thread_safe_mkdir(image_dir)
-    return image_dir, image_rel_dir
-
-def write_notebook_output(resources, image_dir, image_rel_dir, evaluated_text):
-    my_uuid = uuid.uuid4().hex
-
-    for output in resources['outputs']:
-        new_name = image_dir + os.path.sep + my_uuid + output
-        new_relative_name = image_rel_dir + os.path.sep + my_uuid + output
-        evaluated_text = evaluated_text.replace(output, new_relative_name)
-        with open(new_name, 'wb') as f:
-            f.write(resources['outputs'][output])
-    return evaluated_text
-
-def thread_safe_mkdir(dirname):
-    try:
-        os.makedirs(dirname)
-    except OSError as e:
-        if e.errno != errno.EEXIST:
-            raise
-        pass

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import shutil
-import io
-import tempfile
-from sphinx.util.compat import Directive
-from docutils.parsers.rst import directives
-from IPython.nbformat import current
-from notebook_sphinxext import \
-    notebook_node, visit_notebook_node, depart_notebook_node, \
-    evaluate_notebook, make_image_dir, write_notebook_output
-
-
-class NotebookCellDirective(Directive):
-    """Insert an evaluated notebook cell into a document
-
-    This uses runipy and nbconvert to transform an inline python
-    script into html suitable for embedding in a Sphinx document.
-    """
-    required_arguments = 0
-    optional_arguments = 1
-    has_content = True
-    option_spec = {'skip_exceptions': directives.flag}
-
-    def run(self):
-        # check if raw html is supported
-        if not self.state.document.settings.raw_enabled:
-            raise self.warning('"%s" directive disabled.' % self.name)
-
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        os.chdir(tmpdir)
-
-        rst_file = self.state_machine.document.attributes['source']
-        rst_dir = os.path.abspath(os.path.dirname(rst_file))
-
-        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
-        # Construct notebook from cell content
-        content = "\n".join(self.content)
-        with open("temp.py", "w") as f:
-            f.write(content)
-
-        convert_to_ipynb('temp.py', 'temp.ipynb')
-
-        skip_exceptions = 'skip_exceptions' in self.options
-
-        evaluated_text, resources = evaluate_notebook(
-            'temp.ipynb', skip_exceptions=skip_exceptions)
-
-        evaluated_text = write_notebook_output(
-            resources, image_dir, image_rel_dir, evaluated_text)
-
-        # create notebook node
-        attributes = {'format': 'html', 'source': 'nb_path'}
-        nb_node = notebook_node('', evaluated_text, **attributes)
-        (nb_node.source, nb_node.line) = \
-            self.state_machine.get_source_and_line(self.lineno)
-
-        # clean up
-        os.chdir(cwd)
-        shutil.rmtree(tmpdir, True)
-
-        return [nb_node]
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_node(notebook_node,
-                 html=(visit_notebook_node, depart_notebook_node))
-
-    app.add_directive('notebook-cell', NotebookCellDirective)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-def convert_to_ipynb(py_file, ipynb_file):
-    with io.open(py_file, 'r', encoding='utf-8') as f:
-        notebook = current.reads(f.read(), format='py')
-    with io.open(ipynb_file, 'w', encoding='utf-8') as f:
-        current.write(notebook, f, format='ipynb')

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 doc/extensions/numpydocmod/__init__.py
--- a/doc/extensions/numpydocmod/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-from numpydoc import setup

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 doc/extensions/numpydocmod/comment_eater.py
--- a/doc/extensions/numpydocmod/comment_eater.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from cStringIO import StringIO
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from compiler_unparse import unparse
-
-
-class Comment(object):
-    """ A comment block.
-    """
-    is_comment = True
-    def __init__(self, start_lineno, end_lineno, text):
-        # int : The first line number in the block. 1-indexed.
-        self.start_lineno = start_lineno
-        # int : The last line number. Inclusive!
-        self.end_lineno = end_lineno
-        # str : The text block including '#' character but not any leading spaces.
-        self.text = text
-
-    def add(self, string, start, end, line):
-        """ Add a new comment line.
-        """
-        self.start_lineno = min(self.start_lineno, start[0])
-        self.end_lineno = max(self.end_lineno, end[0])
-        self.text += string
-
-    def __repr__(self):
-        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno, self.text)
-
-
-class NonComment(object):
-    """ A non-comment block of code.
-    """
-    is_comment = False
-    def __init__(self, start_lineno, end_lineno):
-        self.start_lineno = start_lineno
-        self.end_lineno = end_lineno
-
-    def add(self, string, start, end, line):
-        """ Add lines to the block.
-        """
-        if string.strip():
-            # Only add if not entirely whitespace.
-            self.start_lineno = min(self.start_lineno, start[0])
-            self.end_lineno = max(self.end_lineno, end[0])
-
-    def __repr__(self):
-        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno)
-
-
-class CommentBlocker(object):
-    """ Pull out contiguous comment blocks.
-    """
-    def __init__(self):
-        # Start with a dummy.
-        self.current_block = NonComment(0, 0)
-
-        # All of the blocks seen so far.
-        self.blocks = []
-
-        # The index mapping lines of code to their associated comment blocks.
-        self.index = {}
-
-    def process_file(self, file):
-        """ Process a file object.
-        """
-        for token in tokenize.generate_tokens(file.next):
-            self.process_token(*token)
-        self.make_index()
-
-    def process_token(self, kind, string, start, end, line):
-        """ Process a single token.
-        """
-        if self.current_block.is_comment:
-            if kind == tokenize.COMMENT:
-                self.current_block.add(string, start, end, line)
-            else:
-                self.new_noncomment(start[0], end[0])
-        else:
-            if kind == tokenize.COMMENT:
-                self.new_comment(string, start, end, line)
-            else:
-                self.current_block.add(string, start, end, line)
-
-    def new_noncomment(self, start_lineno, end_lineno):
-        """ We are transitioning from a noncomment to a comment.
-        """
-        block = NonComment(start_lineno, end_lineno)
-        self.blocks.append(block)
-        self.current_block = block
-
-    def new_comment(self, string, start, end, line):
-        """ Possibly add a new comment.
-        
-        Only adds a new comment if this comment is the only thing on the line.
-        Otherwise, it extends the noncomment block.
-        """
-        prefix = line[:start[1]]
-        if prefix.strip():
-            # Oops! Trailing comment, not a comment block.
-            self.current_block.add(string, start, end, line)
-        else:
-            # A comment block.
-            block = Comment(start[0], end[0], string)
-            self.blocks.append(block)
-            self.current_block = block
-
-    def make_index(self):
-        """ Make the index mapping lines of actual code to their associated
-        prefix comments.
-        """
-        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
-            if not block.is_comment:
-                self.index[block.start_lineno] = prev
-
-    def search_for_comment(self, lineno, default=None):
-        """ Find the comment block just before the given line number.
-
-        Returns None (or the specified default) if there is no such block.
-        """
-        if not self.index:
-            self.make_index()
-        block = self.index.get(lineno, None)
-        text = getattr(block, 'text', default)
-        return text
-
-
-def strip_comment_marker(text):
-    """ Strip # markers at the front of a block of comment text.
-    """
-    lines = []
-    for line in text.splitlines():
-        lines.append(line.lstrip('#'))
-    text = textwrap.dedent('\n'.join(lines))
-    return text
-
-
-def get_class_traits(klass):
-    """ Yield all of the documentation for trait definitions on a class object.
-    """
-    # FIXME: gracefully handle errors here or in the caller?
-    source = inspect.getsource(klass)
-    cb = CommentBlocker()
-    cb.process_file(StringIO(source))
-    mod_ast = compiler.parse(source)
-    class_ast = mod_ast.node.nodes[0]
-    for node in class_ast.code.nodes:
-        # FIXME: handle other kinds of assignments?
-        if isinstance(node, compiler.ast.Assign):
-            name = node.nodes[0].name
-            rhs = unparse(node.expr).strip()
-            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
-            yield name, rhs, doc
-

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 doc/extensions/numpydocmod/compiler_unparse.py
--- a/doc/extensions/numpydocmod/compiler_unparse.py
+++ /dev/null
@@ -1,860 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
-    The unparse method takes a compiler.ast tree and transforms it back into
-    valid python code.  It is incomplete and currently only works for
-    import statements, function calls, function definitions, assignments, and
-    basic expressions.
-
-    Inspired by python-2.5-svn/Demo/parser/unparse.py
-
-    fixme: We may want to move to using _ast trees because the compiler for
-           them is about 6 times faster than compiler.compile.
-"""
-
-import sys
-import cStringIO
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-def unparse(ast, single_line_functions=False):
-    s = cStringIO.StringIO()
-    UnparseCompilerAst(ast, s, single_line_functions)
-    return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
-                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
-    """ Methods in this class recursively traverse an AST and
-        output source code for the abstract syntax; original formatting
-        is disregarged.
-    """
-
-    #########################################################################
-    # object interface.
-    #########################################################################
-
-    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
-        """ Unparser(tree, file=sys.stdout) -> None.
-
-            Print the source for tree to file.
-        """
-        self.f = file
-        self._single_func = single_line_functions
-        self._do_indent = True
-        self._indent = 0
-        self._dispatch(tree)
-        self._write("\n")
-        self.f.flush()
-
-    #########################################################################
-    # Unparser private interface.
-    #########################################################################
-
-    ### format, output, and dispatch methods ################################
-
-    def _fill(self, text = ""):
-        "Indent a piece of text, according to the current indentation level"
-        if self._do_indent:
-            self._write("\n"+"    "*self._indent + text)
-        else:
-            self._write(text)
-
-    def _write(self, text):
-        "Append a piece of text to the current line."
-        self.f.write(text)
-
-    def _enter(self):
-        "Print ':', and increase the indentation."
-        self._write(": ")
-        self._indent += 1
-
-    def _leave(self):
-        "Decrease the indentation level."
-        self._indent -= 1
-
-    def _dispatch(self, tree):
-        "_dispatcher function, _dispatching tree type T to method _T."
-        if isinstance(tree, list):
-            for t in tree:
-                self._dispatch(t)
-            return
-        meth = getattr(self, "_"+tree.__class__.__name__)
-        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
-            return
-        meth(tree)
-
-
-    #########################################################################
-    # compiler.ast unparsing methods.
-    #
-    # There should be one method per concrete grammar type. They are
-    # organized in alphabetical order.
-    #########################################################################
-
-    def _Add(self, t):
-        self.__binary_op(t, '+')
-
-    def _And(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") and (")
-        self._write(")")
-               
-    def _AssAttr(self, t):
-        """ Handle assigning an attribute of an object
-        """
-        self._dispatch(t.expr)
-        self._write('.'+t.attrname)
- 
-    def _Assign(self, t):
-        """ Expression Assignment such as "a = 1".
-
-            This only handles assignment in expressions.  Keyword assignment
-            is handled separately.
-        """
-        self._fill()
-        for target in t.nodes:
-            self._dispatch(target)
-            self._write(" = ")
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write('; ')
-
-    def _AssName(self, t):
-        """ Name on left hand side of expression.
-
-            Treat just like a name on the right side of an expression.
-        """
-        self._Name(t)
-
-    def _AssTuple(self, t):
-        """ Tuple on left hand side of an expression.
-        """
-
-        # _write each elements, separated by a comma.
-        for element in t.nodes[:-1]:
-            self._dispatch(element)
-            self._write(", ")
-
-        # Handle the last one without writing comma
-        last_element = t.nodes[-1]
-        self._dispatch(last_element)
-
-    def _AugAssign(self, t):
-        """ +=,-=,*=,/=,**=, etc. operations
-        """
-        
-        self._fill()
-        self._dispatch(t.node)
-        self._write(' '+t.op+' ')
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write(';')
-            
-    def _Bitand(self, t):
-        """ Bit and operation.
-        """
-        
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" & ")
-                
-    def _Bitor(self, t):
-        """ Bit or operation
-        """
-        
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" | ")
-                
-    def _CallFunc(self, t):
-        """ Function call.
-        """
-        self._dispatch(t.node)
-        self._write("(")
-        comma = False
-        for e in t.args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._dispatch(e)
-        if t.star_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("*")
-            self._dispatch(t.star_args)
-        if t.dstar_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("**")
-            self._dispatch(t.dstar_args)
-        self._write(")")
-
-    def _Compare(self, t):
-        self._dispatch(t.expr)
-        for op, expr in t.ops:
-            self._write(" " + op + " ")
-            self._dispatch(expr)
-
-    def _Const(self, t):
-        """ A constant value such as an integer value, 3, or a string, "hello".
-        """
-        self._dispatch(t.value)
-
-    def _Decorators(self, t):
-        """ Handle function decorators (eg. @has_units)
-        """
-        for node in t.nodes:
-            self._dispatch(node)
-
-    def _Dict(self, t):
-        self._write("{")
-        for  i, (k, v) in enumerate(t.items):
-            self._dispatch(k)
-            self._write(": ")
-            self._dispatch(v)
-            if i < len(t.items)-1:
-                self._write(", ")
-        self._write("}")
-
-    def _Discard(self, t):
-        """ Node for when return value is ignored such as in "foo(a)".
-        """
-        self._fill()
-        self._dispatch(t.expr)
-
-    def _Div(self, t):
-        self.__binary_op(t, '/')
-
-    def _Ellipsis(self, t):
-        self._write("...")
-
-    def _From(self, t):
-        """ Handle "from xyz import foo, bar as baz".
-        """
-        # fixme: Are From and ImportFrom handled differently?
-        self._fill("from ")
-        self._write(t.modname)
-        self._write(" import ")
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-                
-    def _Function(self, t):
-        """ Handle function definitions
-        """
-        if t.decorators is not None:
-            self._fill("@")
-            self._dispatch(t.decorators)
-        self._fill("def "+t.name + "(")
-        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
-        for i, arg in enumerate(zip(t.argnames, defaults)):
-            self._write(arg[0])
-            if arg[1] is not None:
-                self._write('=')
-                self._dispatch(arg[1])
-            if i < len(t.argnames)-1:
-                self._write(', ')
-        self._write(")")
-        if self._single_func:
-            self._do_indent = False
-        self._enter()
-        self._dispatch(t.code)
-        self._leave()
-        self._do_indent = True
-
-    def _Getattr(self, t):
-        """ Handle getting an attribute of an object
-        """
-        if isinstance(t.expr, (Div, Mul, Sub, Add)):
-            self._write('(')
-            self._dispatch(t.expr)
-            self._write(')')
-        else:
-            self._dispatch(t.expr)
-            
-        self._write('.'+t.attrname)
-        
-    def _If(self, t):
-        self._fill()
-        
-        for i, (compare,code) in enumerate(t.tests):
-            if i == 0:
-                self._write("if ")
-            else:
-                self._write("elif ")
-            self._dispatch(compare)
-            self._enter()
-            self._fill()
-            self._dispatch(code)
-            self._leave()
-            self._write("\n")
-
-        if t.else_ is not None:
-            self._write("else")
-            self._enter()
-            self._fill()
-            self._dispatch(t.else_)
-            self._leave()
-            self._write("\n")
-            
-    def _IfExp(self, t):
-        self._dispatch(t.then)
-        self._write(" if ")
-        self._dispatch(t.test)
-
-        if t.else_ is not None:
-            self._write(" else (")
-            self._dispatch(t.else_)
-            self._write(")")
-
-    def _Import(self, t):
-        """ Handle "import xyz.foo".
-        """
-        self._fill("import ")
-        
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-
-    def _Keyword(self, t):
-        """ Keyword value assignment within function calls and definitions.
-        """
-        self._write(t.name)
-        self._write("=")
-        self._dispatch(t.expr)
-        
-    def _List(self, t):
-        self._write("[")
-        for  i,node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i < len(t.nodes)-1:
-                self._write(", ")
-        self._write("]")
-
-    def _Module(self, t):
-        if t.doc is not None:
-            self._dispatch(t.doc)
-        self._dispatch(t.node)
-
-    def _Mul(self, t):
-        self.__binary_op(t, '*')
-
-    def _Name(self, t):
-        self._write(t.name)
-
-    def _NoneType(self, t):
-        self._write("None")
-        
-    def _Not(self, t):
-        self._write('not (')
-        self._dispatch(t.expr)
-        self._write(')')
-        
-    def _Or(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") or (")
-        self._write(")")
-                
-    def _Pass(self, t):
-        self._write("pass\n")
-
-    def _Printnl(self, t):
-        self._fill("print ")
-        if t.dest:
-            self._write(">> ")
-            self._dispatch(t.dest)
-            self._write(", ")
-        comma = False
-        for node in t.nodes:
-            if comma: self._write(', ')
-            else: comma = True
-            self._dispatch(node)
-
-    def _Power(self, t):
-        self.__binary_op(t, '**')
-
-    def _Return(self, t):
-        self._fill("return ")
-        if t.value:
-            if isinstance(t.value, Tuple):
-                text = ', '.join([ name.name for name in t.value.asList() ])
-                self._write(text)
-            else:
-                self._dispatch(t.value)
-            if not self._do_indent:
-                self._write('; ')
-
-    def _Slice(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        if t.lower:
-            self._dispatch(t.lower)
-        self._write(":")
-        if t.upper:
-            self._dispatch(t.upper)
-        #if t.step:
-        #    self._write(":")
-        #    self._dispatch(t.step)
-        self._write("]")
-
-    def _Sliceobj(self, t):
-        for i, node in enumerate(t.nodes):
-            if i != 0:
-                self._write(":")
-            if not (isinstance(node, Const) and node.value is None):
-                self._dispatch(node)
-
-    def _Stmt(self, tree):
-        for node in tree.nodes:
-            self._dispatch(node)
-
-    def _Sub(self, t):
-        self.__binary_op(t, '-')
-
-    def _Subscript(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        for i, value in enumerate(t.subs):
-            if i != 0:
-                self._write(",")
-            self._dispatch(value)
-        self._write("]")
-
-    def _TryExcept(self, t):
-        self._fill("try")
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-
-        for handler in t.handlers:
-            self._fill('except ')
-            self._dispatch(handler[0])
-            if handler[1] is not None:
-                self._write(', ')
-                self._dispatch(handler[1])
-            self._enter()
-            self._dispatch(handler[2])
-            self._leave()
-            
-        if t.else_:
-            self._fill("else")
-            self._enter()
-            self._dispatch(t.else_)
-            self._leave()
-
-    def _Tuple(self, t):
-
-        if not t.nodes:
-            # Empty tuple.
-            self._write("()")
-        else:
-            self._write("(")
-
-            # _write each elements, separated by a comma.
-            for element in t.nodes[:-1]:
-                self._dispatch(element)
-                self._write(", ")
-
-            # Handle the last one without writing comma
-            last_element = t.nodes[-1]
-            self._dispatch(last_element)
-
-            self._write(")")
-            
-    def _UnaryAdd(self, t):
-        self._write("+")
-        self._dispatch(t.expr)
-        
-    def _UnarySub(self, t):
-        self._write("-")
-        self._dispatch(t.expr)        
-
-    def _With(self, t):
-        self._fill('with ')
-        self._dispatch(t.expr)
-        if t.vars:
-            self._write(' as ')
-            self._dispatch(t.vars.name)
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-        self._write('\n')
-        
-    def _int(self, t):
-        self._write(repr(t))
-
-    def __binary_op(self, t, symbol):
-        # Check if parenthesis are needed on left side and then dispatch
-        has_paren = False
-        left_class = str(t.left.__class__)
-        if (left_class in op_precedence.keys() and
-            op_precedence[left_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.left)
-        if has_paren:
-            self._write(')')
-        # Write the appropriate symbol for operator
-        self._write(symbol)
-        # Check if parenthesis are needed on the right side and then dispatch
-        has_paren = False
-        right_class = str(t.right.__class__)
-        if (right_class in op_precedence.keys() and
-            op_precedence[right_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.right)
-        if has_paren:
-            self._write(')')
-
-    def _float(self, t):
-        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
-        # We prefer str here.
-        self._write(str(t))
-
-    def _str(self, t):
-        self._write(repr(t))
-        
-    def _tuple(self, t):
-        self._write(str(t))
-
-    #########################################################################
-    # These are the methods from the _ast modules unparse.
-    #
-    # As our needs to handle more advanced code increase, we may want to
-    # modify some of the methods below so that they work for compiler.ast.
-    #########################################################################
-
-#    # stmt
-#    def _Expr(self, tree):
-#        self._fill()
-#        self._dispatch(tree.value)
-#
-#    def _Import(self, t):
-#        self._fill("import ")
-#        first = True
-#        for a in t.names:
-#            if first:
-#                first = False
-#            else:
-#                self._write(", ")
-#            self._write(a.name)
-#            if a.asname:
-#                self._write(" as "+a.asname)
-#
-##    def _ImportFrom(self, t):
-##        self._fill("from ")
-##        self._write(t.module)
-##        self._write(" import ")
-##        for i, a in enumerate(t.names):
-##            if i == 0:
-##                self._write(", ")
-##            self._write(a.name)
-##            if a.asname:
-##                self._write(" as "+a.asname)
-##        # XXX(jpe) what is level for?
-##
-#
-#    def _Break(self, t):
-#        self._fill("break")
-#
-#    def _Continue(self, t):
-#        self._fill("continue")
-#
-#    def _Delete(self, t):
-#        self._fill("del ")
-#        self._dispatch(t.targets)
-#
-#    def _Assert(self, t):
-#        self._fill("assert ")
-#        self._dispatch(t.test)
-#        if t.msg:
-#            self._write(", ")
-#            self._dispatch(t.msg)
-#
-#    def _Exec(self, t):
-#        self._fill("exec ")
-#        self._dispatch(t.body)
-#        if t.globals:
-#            self._write(" in ")
-#            self._dispatch(t.globals)
-#        if t.locals:
-#            self._write(", ")
-#            self._dispatch(t.locals)
-#
-#    def _Print(self, t):
-#        self._fill("print ")
-#        do_comma = False
-#        if t.dest:
-#            self._write(">>")
-#            self._dispatch(t.dest)
-#            do_comma = True
-#        for e in t.values:
-#            if do_comma:self._write(", ")
-#            else:do_comma=True
-#            self._dispatch(e)
-#        if not t.nl:
-#            self._write(",")
-#
-#    def _Global(self, t):
-#        self._fill("global")
-#        for i, n in enumerate(t.names):
-#            if i != 0:
-#                self._write(",")
-#            self._write(" " + n)
-#
-#    def _Yield(self, t):
-#        self._fill("yield")
-#        if t.value:
-#            self._write(" (")
-#            self._dispatch(t.value)
-#            self._write(")")
-#
-#    def _Raise(self, t):
-#        self._fill('raise ')
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.inst:
-#            self._write(", ")
-#            self._dispatch(t.inst)
-#        if t.tback:
-#            self._write(", ")
-#            self._dispatch(t.tback)
-#
-#
-#    def _TryFinally(self, t):
-#        self._fill("try")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#        self._fill("finally")
-#        self._enter()
-#        self._dispatch(t.finalbody)
-#        self._leave()
-#
-#    def _excepthandler(self, t):
-#        self._fill("except ")
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.name:
-#            self._write(", ")
-#            self._dispatch(t.name)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _ClassDef(self, t):
-#        self._write("\n")
-#        self._fill("class "+t.name)
-#        if t.bases:
-#            self._write("(")
-#            for a in t.bases:
-#                self._dispatch(a)
-#                self._write(", ")
-#            self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _FunctionDef(self, t):
-#        self._write("\n")
-#        for deco in t.decorators:
-#            self._fill("@")
-#            self._dispatch(deco)
-#        self._fill("def "+t.name + "(")
-#        self._dispatch(t.args)
-#        self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _For(self, t):
-#        self._fill("for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    def _While(self, t):
-#        self._fill("while ")
-#        self._dispatch(t.test)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    # expr
-#    def _Str(self, tree):
-#        self._write(repr(tree.s))
-##
-#    def _Repr(self, t):
-#        self._write("`")
-#        self._dispatch(t.value)
-#        self._write("`")
-#
-#    def _Num(self, t):
-#        self._write(repr(t.n))
-#
-#    def _ListComp(self, t):
-#        self._write("[")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write("]")
-#
-#    def _GeneratorExp(self, t):
-#        self._write("(")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write(")")
-#
-#    def _comprehension(self, t):
-#        self._write(" for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        for if_clause in t.ifs:
-#            self._write(" if ")
-#            self._dispatch(if_clause)
-#
-#    def _IfExp(self, t):
-#        self._dispatch(t.body)
-#        self._write(" if ")
-#        self._dispatch(t.test)
-#        if t.orelse:
-#            self._write(" else ")
-#            self._dispatch(t.orelse)
-#
-#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-#    def _UnaryOp(self, t):
-#        self._write(self.unop[t.op.__class__.__name__])
-#        self._write("(")
-#        self._dispatch(t.operand)
-#        self._write(")")
-#
-#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-#                    "FloorDiv":"//", "Pow": "**"}
-#    def _BinOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.left)
-#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-#        self._dispatch(t.right)
-#        self._write(")")
-#
-#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
-#    def _BoolOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.values[0])
-#        for v in t.values[1:]:
-#            self._write(" %s " % self.boolops[t.op.__class__])
-#            self._dispatch(v)
-#        self._write(")")
-#
-#    def _Attribute(self,t):
-#        self._dispatch(t.value)
-#        self._write(".")
-#        self._write(t.attr)
-#
-##    def _Call(self, t):
-##        self._dispatch(t.func)
-##        self._write("(")
-##        comma = False
-##        for e in t.args:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        for e in t.keywords:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        if t.starargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("*")
-##            self._dispatch(t.starargs)
-##        if t.kwargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("**")
-##            self._dispatch(t.kwargs)
-##        self._write(")")
-#
-#    # slice
-#    def _Index(self, t):
-#        self._dispatch(t.value)
-#
-#    def _ExtSlice(self, t):
-#        for i, d in enumerate(t.dims):
-#            if i != 0:
-#                self._write(': ')
-#            self._dispatch(d)
-#
-#    # others
-#    def _arguments(self, t):
-#        first = True
-#        nonDef = len(t.args)-len(t.defaults)
-#        for a in t.args[0:nonDef]:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a)
-#        for a,d in zip(t.args[nonDef:], t.defaults):
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a),
-#            self._write("=")
-#            self._dispatch(d)
-#        if t.vararg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("*"+t.vararg)
-#        if t.kwarg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("**"+t.kwarg)
-#
-##    def _keyword(self, t):
-##        self._write(t.arg)
-##        self._write("=")
-##        self._dispatch(t.value)
-#
-#    def _Lambda(self, t):
-#        self._write("lambda ")
-#        self._dispatch(t.args)
-#        self._write(": ")
-#        self._dispatch(t.body)
-
-
-

diff -r 88d35715b7c871663a57994384acd76872b02b76 -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 doc/extensions/numpydocmod/docscrape.py
--- a/doc/extensions/numpydocmod/docscrape.py
+++ /dev/null
@@ -1,500 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-
-import inspect
-import textwrap
-import re
-import pydoc
-from StringIO import StringIO
-from warnings import warn
-
-class Reader(object):
-    """A line-based string reader.
-
-    """
-    def __init__(self, data):
-        """
-        Parameters
-        ----------
-        data : str
-           String with lines separated by '\n'.
-
-        """
-        if isinstance(data,list):
-            self._str = data
-        else:
-            self._str = data.split('\n') # store string as list of lines
-
-        self.reset()
-
-    def __getitem__(self, n):
-        return self._str[n]
-
-    def reset(self):
-        self._l = 0 # current line nr
-
-    def read(self):
-        if not self.eof():
-            out = self[self._l]
-            self._l += 1
-            return out
-        else:
-            return ''
-
-    def seek_next_non_empty_line(self):
-        for l in self[self._l:]:
-            if l.strip():
-                break
-            else:
-                self._l += 1
-
-    def eof(self):
-        return self._l >= len(self._str)
-
-    def read_to_condition(self, condition_func):
-        start = self._l
-        for line in self[start:]:
-            if condition_func(line):
-                return self[start:self._l]
-            self._l += 1
-            if self.eof():
-                return self[start:self._l+1]
-        return []
-
-    def read_to_next_empty_line(self):
-        self.seek_next_non_empty_line()
-        def is_empty(line):
-            return not line.strip()
-        return self.read_to_condition(is_empty)
-
-    def read_to_next_unindented_line(self):
-        def is_unindented(line):
-            return (line.strip() and (len(line.lstrip()) == len(line)))
-        return self.read_to_condition(is_unindented)
-
-    def peek(self,n=0):
-        if self._l + n < len(self._str):
-            return self[self._l + n]
-        else:
-            return ''
-
-    def is_empty(self):
-        return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
-    def __init__(self, docstring, config={}):
-        docstring = textwrap.dedent(docstring).split('\n')
-
-        self._doc = Reader(docstring)
-        self._parsed_data = {
-            'Signature': '',
-            'Summary': [''],
-            'Extended Summary': [],
-            'Parameters': [],
-            'Returns': [],
-            'Raises': [],
-            'Warns': [],
-            'Other Parameters': [],
-            'Attributes': [],
-            'Methods': [],
-            'See Also': [],
-            'Notes': [],
-            'Warnings': [],
-            'References': '',
-            'Examples': '',
-            'index': {}
-            }
-
-        self._parse()
-
-    def __getitem__(self,key):
-        return self._parsed_data[key]
-
-    def __setitem__(self,key,val):
-        if not self._parsed_data.has_key(key):
-            warn("Unknown section %s" % key)
-        else:
-            self._parsed_data[key] = val
-
-    def _is_at_section(self):
-        self._doc.seek_next_non_empty_line()
-
-        if self._doc.eof():
-            return False
-
-        l1 = self._doc.peek().strip()  # e.g. Parameters
-
-        if l1.startswith('.. index::'):
-            return True
-
-        l2 = self._doc.peek(1).strip() #    ---------- or ==========
-        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
-    def _strip(self,doc):
-        i = 0
-        j = 0
-        for i,line in enumerate(doc):
-            if line.strip(): break
-
-        for j,line in enumerate(doc[::-1]):
-            if line.strip(): break
-
-        return doc[i:len(doc)-j]
-
-    def _read_to_next_section(self):
-        section = self._doc.read_to_next_empty_line()
-
-        while not self._is_at_section() and not self._doc.eof():
-            if not self._doc.peek(-1).strip(): # previous line was empty
-                section += ['']
-
-            section += self._doc.read_to_next_empty_line()
-
-        return section
-
-    def _read_sections(self):
-        while not self._doc.eof():
-            data = self._read_to_next_section()
-            name = data[0].strip()
-
-            if name.startswith('..'): # index section
-                yield name, data[1:]
-            elif len(data) < 2:
-                yield StopIteration
-            else:
-                yield name, self._strip(data[2:])
-
-    def _parse_param_list(self,content):
-        r = Reader(content)
-        params = []
-        while not r.eof():
-            header = r.read().strip()
-            if ' : ' in header:
-                arg_name, arg_type = header.split(' : ')[:2]
-            else:
-                arg_name, arg_type = header, ''
-
-            desc = r.read_to_next_unindented_line()
-            desc = dedent_lines(desc)
-
-            params.append((arg_name,arg_type,desc))
-
-        return params
-
-
-    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
-                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
-    def _parse_see_also(self, content):
-        """
-        func_name : Descriptive text
-            continued text
-        another_func_name : Descriptive text
-        func_name1, func_name2, :meth:`func_name`, func_name3
-
-        """
-        items = []
-
-        def parse_item_name(text):
-            """Match ':role:`name`' or 'name'"""
-            m = self._name_rgx.match(text)
-            if m:
-                g = m.groups()
-                if g[1] is None:
-                    return g[3], None
-                else:
-                    return g[2], g[1]
-            raise ValueError("%s is not a item name" % text)
-
-        def push_item(name, rest):
-            if not name:
-                return
-            name, role = parse_item_name(name)
-            items.append((name, list(rest), role))
-            del rest[:]
-
-        current_func = None
-        rest = []
-
-        for line in content:
-            if not line.strip(): continue
-
-            m = self._name_rgx.match(line)
-            if m and line[m.end():].strip().startswith(':'):
-                push_item(current_func, rest)
-                current_func, line = line[:m.end()], line[m.end():]
-                rest = [line.split(':', 1)[1].strip()]
-                if not rest[0]:
-                    rest = []
-            elif not line.startswith(' '):
-                push_item(current_func, rest)
-                current_func = None
-                if ',' in line:
-                    for func in line.split(','):
-                        if func.strip():
-                            push_item(func, [])
-                elif line.strip():
-                    current_func = line
-            elif current_func is not None:
-                rest.append(line.strip())
-        push_item(current_func, rest)
-        return items
-
-    def _parse_index(self, section, content):
-        """
-        .. index: default
-           :refguide: something, else, and more
-
-        """
-        def strip_each_in(lst):
-            return [s.strip() for s in lst]
-
-        out = {}
-        section = section.split('::')
-        if len(section) > 1:
-            out['default'] = strip_each_in(section[1].split(','))[0]
-        for line in content:
-            line = line.split(':')
-            if len(line) > 2:
-                out[line[1]] = strip_each_in(line[2].split(','))
-        return out
-
-    def _parse_summary(self):
-        """Grab signature (if given) and summary"""
-        if self._is_at_section():
-            return
-
-        summary = self._doc.read_to_next_empty_line()
-        summary_str = " ".join([s.strip() for s in summary]).strip()
-        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
-            self['Signature'] = summary_str
-            if not self._is_at_section():
-                self['Summary'] = self._doc.read_to_next_empty_line()
-        else:
-            self['Summary'] = summary
-
-        if not self._is_at_section():
-            self['Extended Summary'] = self._read_to_next_section()
-
-    def _parse(self):
-        self._doc.reset()
-        self._parse_summary()
-
-        for (section,content) in self._read_sections():
-            if not section.startswith('..'):
-                section = ' '.join([s.capitalize() for s in section.split(' ')])
-            if section in ('Parameters', 'Returns', 'Raises', 'Warns',
-                           'Other Parameters', 'Attributes', 'Methods'):
-                self[section] = self._parse_param_list(content)
-            elif section.startswith('.. index::'):
-                self['index'] = self._parse_index(section, content)
-            elif section == 'See Also':
-                self['See Also'] = self._parse_see_also(content)
-            else:
-                self[section] = content
-
-    # string conversion routines
-
-    def _str_header(self, name, symbol='-'):
-        return [name, len(name)*symbol]
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        if self['Signature']:
-            return [self['Signature'].replace('*','\*')] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        if self['Summary']:
-            return self['Summary'] + ['']
-        else:
-            return []
-
-    def _str_extended_summary(self):
-        if self['Extended Summary']:
-            return self['Extended Summary'] + ['']
-        else:
-            return []
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            for param,param_type,desc in self[name]:
-                out += ['%s : %s' % (param, param_type)]
-                out += self._str_indent(desc)
-            out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += self[name]
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        if not self['See Also']: return []
-        out = []
-        out += self._str_header("See Also")
-        last_had_desc = True
-        for func, desc, role in self['See Also']:
-            if role:
-                link = ':%s:`%s`' % (role, func)
-            elif func_role:
-                link = ':%s:`%s`' % (func_role, func)
-            else:
-                link = "`%s`_" % func
-            if desc or last_had_desc:
-                out += ['']
-                out += [link]
-            else:
-                out[-1] += ", %s" % link
-            if desc:
-                out += self._str_indent([' '.join(desc)])
-                last_had_desc = True
-            else:
-                last_had_desc = False
-        out += ['']
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.iteritems():
-            if section == 'default':
-                continue
-            out += ['   :%s: %s' % (section, ', '.join(references))]
-        return out
-
-    def __str__(self, func_role=''):
-        out = []
-        out += self._str_signature()
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Returns', 'Other Parameters',
-                           'Raises', 'Warns'):
-            out += self._str_param_list(param_list)
-        out += self._str_section('Warnings')
-        out += self._str_see_also(func_role)
-        for s in ('Notes','References','Examples'):
-            out += self._str_section(s)
-        for param_list in ('Attributes', 'Methods'):
-            out += self._str_param_list(param_list)
-        out += self._str_index()
-        return '\n'.join(out)
-
-
-def indent(str,indent=4):
-    indent_str = ' '*indent
-    if str is None:
-        return indent_str
-    lines = str.split('\n')
-    return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
-    """Deindent a list of lines maximally"""
-    return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
-    return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
-    def __init__(self, func, role='func', doc=None, config={}):
-        self._f = func
-        self._role = role # e.g. "func" or "meth"
-
-        if doc is None:
-            if func is None:
-                raise ValueError("No function or docstring given")
-            doc = inspect.getdoc(func) or ''
-        NumpyDocString.__init__(self, doc)
-
-        if not self['Signature'] and func is not None:
-            func, func_name = self.get_func()
-            try:
-                # try to read signature
-                argspec = inspect.getargspec(func)
-                argspec = inspect.formatargspec(*argspec)
-                argspec = argspec.replace('*','\*')
-                signature = '%s%s' % (func_name, argspec)
-            except TypeError, e:
-                signature = '%s()' % func_name
-            self['Signature'] = signature
-
-    def get_func(self):
-        func_name = getattr(self._f, '__name__', self.__class__.__name__)
-        if inspect.isclass(self._f):
-            func = getattr(self._f, '__call__', self._f.__init__)
-        else:
-            func = self._f
-        return func, func_name
-
-    def __str__(self):
-        out = ''
-
-        func, func_name = self.get_func()
-        signature = self['Signature'].replace('*', '\*')
-
-        roles = {'func': 'function',
-                 'meth': 'method'}
-
-        if self._role:
-            if not roles.has_key(self._role):
-                print("Warning: invalid role %s" % self._role)
-            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
-                                             func_name)
-
-        out += super(FunctionDoc, self).__str__(func_role=self._role)
-        return out
-
-
-class ClassDoc(NumpyDocString):
-    def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
-                 config={}):
-        if not inspect.isclass(cls) and cls is not None:
-            raise ValueError("Expected a class or None, but got %r" % cls)
-        self._cls = cls
-
-        if modulename and not modulename.endswith('.'):
-            modulename += '.'
-        self._mod = modulename
-
-        if doc is None:
-            if cls is None:
-                raise ValueError("No class or documentation string given")
-            doc = pydoc.getdoc(cls)
-
-        NumpyDocString.__init__(self, doc)
-
-        if config.get('show_class_members', True):
-            if not self['Methods']:
-                self['Methods'] = [(name, '', '')
-                                   for name in sorted(self.methods)]
-            if not self['Attributes']:
-                self['Attributes'] = [(name, '', '')
-                                      for name in sorted(self.properties)]
-
-    @property
-    def methods(self):
-        if self._cls is None:
-            return []
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and callable(func)]
-
-    @property
-    def properties(self):
-        if self._cls is None:
-            return []
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and func is None]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/109872b62c38/
Changeset:   109872b62c38
Branch:      yt
User:        jzuhone
Date:        2016-03-04 15:33:24+00:00
Summary:     Merge
Affected #:  87 files

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -94,16 +94,16 @@
 
 There is a third, borderline class of field in yt, as well.  This is the
 "alias" type, where a field on disk (for example, (frontend, ``Density``)) is 
-aliased into an internal yt-name (for example, (``gas``, ``density``)).  The 
+aliased into an internal yt-name (for example, (``gas``, ``density``)). The 
 aliasing process allows universally-defined derived fields to take advantage of 
 internal names, and it also provides an easy way to address what units something 
 should be returned in.  If an aliased field is requested (and aliased fields 
 will always be lowercase, with underscores separating words) it will be returned 
-in CGS units (future versions will enable global defaults to be set for MKS and 
-other unit systems), whereas if the frontend-specific field is requested, it 
-will not undergo any unit conversions from its natural units.  (This rule is 
-occasionally violated for fields which are mesh-dependent, specifically particle 
-masses in some cosmology codes.)
+in the units specified by the unit system of the database (see :ref:`unit_systems`
+for a guide to using the different unit systems in yt), whereas if the 
+frontend-specific field is requested, it will not undergo any unit conversions 
+from its natural units.  (This rule is occasionally violated for fields which 
+are mesh-dependent, specifically particle masses in some cosmology codes.)
 
 .. _known-field-types:
 
@@ -125,7 +125,8 @@
 * ``gas`` -- This is the usual default for simulation frontends for fluid
   types.  These fields are typically aliased to the frontend-specific mesh
   fields for grid-based codes or to the deposit fields for particle-based
-  codes.  Default units are in CGS.
+  codes.  Default units are in the unit system of the dataset (see 
+  :ref:`unit_systems` for more information).
 * particle type -- These are particle fields that exist on-disk as written 
   by individual frontends.  If the frontend designates names for these particles
   (i.e. particle type) those names are the field types. 
@@ -240,6 +241,37 @@
    print(ds.field_info["gas", "pressure"].get_units())
    print(ds.field_info["gas", "pressure"].get_source())
 
+.. _bfields:
+
+Magnetic Fields
+---------------
+
+Magnetic fields require special handling, because their dimensions are different in
+different systems of units, in particular between the CGS and MKS (SI) systems of units.
+Superficially, it would appear that they are in the same dimensions, since the units 
+of the magnetic field in the CGS and MKS system are gauss (:math:`\rm{G}`) and tesla 
+(:math:`\rm{T}`), respectively, and numerically :math:`1~\rm{G} = 10^{-4}~\rm{T}`. However, 
+if we examine the base units, we find that they do indeed have different dimensions:
+
+.. math::
+
+    \rm{1~G = 1~\frac{\sqrt{g}}{\sqrt{cm}\cdot{s}}} \\
+    \rm{1~T = 1~\frac{kg}{A\cdot{s^2}}}
+
+It is easier to see the difference between the dimensionality of the magnetic field in the two
+systems in terms of the definition of the magnetic pressure:
+
+.. math::
+
+    p_B = \frac{B^2}{8\pi}~\rm{(cgs)} \\
+    p_B = \frac{B^2}{2\mu_0}~\rm{(MKS)}
+
+where :math:`\mu_0 = 4\pi \times 10^{-7}~\rm{N/A^2}` is the vacuum permeability. yt automatically
+detects on a per-frontend basis what units the magnetic should be in, and allows conversion between 
+different magnetic field units in the different :ref:`unit systems <unit_systems>` as well. To 
+determine how to set up special magnetic field handling when designing a new frontend, check out 
+:ref:`bfields-frontend`.
+
 Particle Fields
 ---------------
 

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -24,9 +24,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -41,9 +39,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (maxval)"
@@ -52,9 +48,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dens)"
@@ -63,9 +57,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "mass = dd['cell_mass']\n",
@@ -79,9 +71,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "dx = dd['dx']\n",
@@ -107,9 +97,11 @@
     "* `in_units`\n",
     "* `in_cgs`\n",
     "* `in_mks`\n",
+    "* `in_base`\n",
     "* `convert_to_units`\n",
     "* `convert_to_cgs`\n",
-    "* `convert_to_mks`"
+    "* `convert_to_mks`\n",
+    "* `convert_to_base`"
    ]
   },
   {
@@ -122,9 +114,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['density'].in_units('Msun/pc**3'))"
@@ -134,35 +124,73 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
+    "`in_cgs` and `in_mks` return a copy of the array converted to CGS and MKS units, respectively:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['pressure'])\n",
-    "print ((dd['pressure']).in_cgs())\n",
-    "print ((dd['pressure']).in_mks())"
+    "print (dd['pressure'].in_cgs())\n",
+    "print (dd['pressure'].in_mks())"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The next two methods do in-place conversions:"
+    "`in_cgs` and `in_mks` are just special cases of the more general `in_base`, which can convert a `YTArray` to a number of different unit systems:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print (dd['pressure'].in_base('imperial')) # Imperial/English base units\n",
+    "print (dd['pressure'].in_base('galactic')) # Base units of kpc, Msun, Myr\n",
+    "print (dd['pressure'].in_base('planck')) # Base units in the Planck system\n",
+    "print (dd['pressure'].in_base()) # defaults to cgs if no argument given"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "`in_base` can even take a dataset as the argument to convert the `YTArray` into the base units of the dataset:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print (dd['pressure'].in_base(ds)) # The IsolatedGalaxy dataset from above"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "yt defines a number of unit systems, and new unit systems may be added by the user, which can also be passed to `in_base`. To learn more about the unit systems, how to use them with datasets and other objects, and how to add new ones, see [Unit Systems](unit_systems.html)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The rest of the methods do in-place conversions:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
    "outputs": [],
    "source": [
     "dens = dd['density']\n",
@@ -182,9 +210,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['density'])\n",
@@ -206,9 +232,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['cell_mass'])\n",
@@ -234,9 +258,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "q1 = yt.YTArray(1.0,\"C\") # coulombs\n",
@@ -249,9 +271,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "B1 = yt.YTArray(1.0,\"T\") # tesla\n",
@@ -285,9 +305,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import numpy as np\n",
@@ -317,9 +335,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['cell_mass'].ndarray_view())\n",
@@ -338,9 +354,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "density_values = dd['density'].d\n",
@@ -374,23 +388,19 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from astropy import units as u\n",
     "\n",
     "x = 42.0 * u.meter\n",
-    "y = yt.YTQuantity.from_astropy(x) "
+    "y = yt.YTQuantity.from_astropy(x)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (x, type(x))\n",
@@ -400,9 +410,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "a = np.random.random(size=10) * u.km/u.s\n",
@@ -412,9 +420,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (a, type(a))\n",
@@ -431,9 +437,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "temp = dd[\"temperature\"]\n",
@@ -443,9 +447,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (temp, type(temp))\n",
@@ -462,9 +464,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from yt.utilities.physical_constants import kboltz\n",
@@ -474,9 +474,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (kboltz, type(kboltz))\n",
@@ -493,9 +491,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "k1 = kboltz.to_astropy()\n",
@@ -506,9 +502,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "c = yt.YTArray.from_astropy(a)\n",
@@ -526,9 +520,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from pint import UnitRegistry\n",
@@ -540,9 +532,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (v, type(v))\n",
@@ -552,9 +542,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ptemp = temp.to_pint()"
@@ -563,9 +551,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (temp, type(temp))\n",
@@ -582,7 +568,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -594,4 +580,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
--- a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
+++ b/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets.  Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional arrays instances produced by different unit systems.  Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n",
+    "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets.  Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional array instances produced by different unit systems.  Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n",
     "\n",
     "As an example, let's load up two enzo datasets from different redshifts in the same cosmology simulation."
    ]
@@ -12,9 +12,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# A high redshift output from z ~ 8\n",
@@ -29,9 +27,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# A low redshift output from z ~ 0\n",
@@ -51,9 +47,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (ds2.length_unit.in_cgs()/ds1.length_unit.in_cgs() == (1+ds1.current_redshift)/(1+ds2.current_redshift))"
@@ -69,9 +63,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (ds2.length_unit/ds1.length_unit)"
@@ -89,9 +81,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -120,7 +110,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -132,4 +122,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- /dev/null
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -0,0 +1,491 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "By default, the results of most calculations in yt are expressed in a \"centimeters-grams-seconds\" (CGS) set of units. This includes the values of derived fields and aliased fields.\n",
+    "\n",
+    "However, this system of units may not be the most natural for a given dataset or an entire class of datasets. For this reason, yt provides the ability to define new unit systems and use them in a way that is highly configurable by the end-user. "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Unit Systems Available in yt"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Several unit systems are already supplied for use within yt. They are:\n",
+    "\n",
+    "* `\"cgs\"`: Centimeters-grams-seconds unit system, with base of `(cm, g, s, K, radian)`. Uses the Gaussian normalization for electromagnetic units. \n",
+    "* `\"mks\"`: Meters-kilograms-seconds unit system, with base of `(m, kg, s, K, radian, A)`.\n",
+    "* `\"imperial\"`: Imperial unit system, with base of `(mile, lbm, s, R, radian)`.\n",
+    "* `\"galactic\"`: \"Galactic\" unit system, with base of `(kpc, Msun, Myr, K, radian)`.\n",
+    "* `\"solar\"`: \"Solar\" unit system, with base of `(AU, Mearth, yr, K, radian)`. \n",
+    "* `\"planck\"`: Planck natural units $(\\hbar = c = G = k_B = 1)$, with base of `(l_pl, m_pl, t_pl, T_pl, radian)`. \n",
+    "* `\"geometrized\"`: Geometrized natural units $(c = G = 1)$, with base of `(l_geom, m_geom, t_geom, K, radian)`. \n",
+    "\n",
+    "We can examine these unit systems by querying them from the `unit_system_registry`. For example, we can look at the default CGS system:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "import yt\n",
+    "yt.unit_system_registry[\"cgs\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can see that we have two sets of units that this system defines: \"base\" and \"other\" units. The \"base\" units are the set of units from which all other units in the system are composed of, such as centimeters, grams, and seconds. The \"other\" units are compound units which fields with specific dimensionalities are converted to, such as ergs, dynes, gauss, and electrostatic units (esu). \n",
+    "\n",
+    "We see a similar setup for the MKS system, except that in this case, there is a base unit of current, the Ampere:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"mks\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can also look at the imperial system:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"imperial\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "and the \"galactic\" system as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"galactic\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Converting `YTArrays` to the Different Unit Systems"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Choosing a Unit System When Loading a Dataset"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "When a dataset is `load`ed, a unit system may be specified. When this happens, all aliased and derived fields will be converted to the units of the given system. The default is `\"cgs\"`.\n",
+    "\n",
+    "For example, we can specify that the fields from a FLASH dataset can be expressed in MKS units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_flash = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100\", unit_system=\"mks\")\n",
+    "sp = ds_flash.sphere(\"c\", (100.,\"kpc\"))\n",
+    "print (sp[\"density\"]) # This is an alias for (\"flash\",\"dens\")\n",
+    "print (sp[\"pressure\"]) # This is an alias for (\"flash\",\"pres\")\n",
+    "print (sp[\"angular_momentum_x\"]) # This is a derived field\n",
+    "print (sp[\"kinetic_energy\"]) # This is also a derived field"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Aliased fields are converted to the requested unit system, but the on-disk fields that they correspond to remain in their original (code) units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (sp[\"flash\",\"dens\"]) # This is aliased to (\"gas\", \"density\")\n",
+    "print (sp[\"flash\",\"pres\"]) # This is aliased to (\"gas\", \"pressure\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can take an `Enzo` dataset and express it in `\"galactic\"` units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_enzo = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\", unit_system=\"galactic\")\n",
+    "sp = ds_enzo.sphere(\"c\", (20.,\"kpc\"))\n",
+    "print (sp[\"density\"])\n",
+    "print (sp[\"pressure\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can also express all of the fields associated with a dataset in that dataset's system of \"code\" units. Though the on-disk fields are already in these units, this means that we can express even derived fields in code units as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_chombo = yt.load(\"KelvinHelmholtz/data.0004.hdf5\", unit_system=\"code\")\n",
+    "dd = ds_chombo.all_data()\n",
+    "print (dd[\"density\"])\n",
+    "print (dd[\"kinetic_energy\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Defining Fields So That They Can Use the Different Unit Systems"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you define a new derived field for use in yt and wish to make the different unit systems available to it, you will need to specify this when calling `add_field`. Suppose I defined a new field called `\"momentum_x\"` and wanted it to have general units. I would have to set it up in this fashion, using the `unit_system` attribute of the dataset and querying it for the appropriate dimensions:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "mom_units = ds_flash.unit_system[\"velocity\"]*ds_flash.unit_system[\"density\"]\n",
+    "def _momentum_x(field, data):\n",
+    "    return data[\"density\"]*data[\"velocity_x\"]\n",
+    "ds_flash.add_field((\"gas\",\"momentum_x\"), function=_momentum_x, units=mom_units)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now, the field will automatically be expressed in whatever units the dataset was called with. In this case, it was MKS:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "slc = yt.SlicePlot(ds_flash, \"z\", [\"momentum_x\"], width=(300.,\"kpc\"))\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the momentum density has been plotted with the correct MKS units of $\\mathrm{kg/(m^2\\cdot{s})}$."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you don't create a derived field from a dataset but instead use `yt.add_field`, and still want to use the unit system of that dataset for the units, the only option at present is to set `units=\"auto\"` in the call to `yt.add_field` and the `dimensions` keyword to the correct dimensions for the field:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.units import clight\n",
+    "\n",
+    "def _rest_energy(field, data):\n",
+    "    return data[\"cell_mass\"]*clight*clight\n",
+    "yt.add_field((\"gas\",\"rest_energy\"), function=_rest_energy, units=\"auto\", dimensions=\"energy\")\n",
+    "\n",
+    "ds_flash2 = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\", unit_system=\"galactic\")\n",
+    "\n",
+    "sp = ds_flash2.sphere(\"c\", (100.,\"kpc\"))\n",
+    "sp[\"rest_energy\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Obtaining Physical Constants in a Specific Unit System"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Each unit system provides the ability to obtain any physical constant in yt's physical constants database in the base units of that system via the `constants` attribute of the unit system. For example, to obtain the value of Newton's universal constant of gravitation in different base units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "for name in [\"cgs\", \"mks\", \"imperial\", \"planck\", \"geometrized\"]:\n",
+    "    unit_system = yt.unit_system_registry[name]\n",
+    "    print (name, unit_system.constants.G)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Equivalently, one could import a physical constant from the main database and convert it using `in_base`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.utilities.physical_constants import G\n",
+    "print (G.in_base(\"mks\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Defining Your Own Unit System"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "You are not limited to using the unit systems already defined by yt. A new unit system can be defined by creating a new `UnitSystem` instance. For example, to create a unit system where the default units are in millimeters, centigrams, and microseconds:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system = yt.UnitSystem(\"small\", \"mm\", \"cg\", \"us\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "where the required arguments are a `name` for the unit system, and the `length_unit`, `mass_unit`, and `time_unit` for the unit system, which serve as the \"base\" units to convert everything else to. Once a unit system instance is created, it is automatically added to the `unit_system_registry` so that it may be used throughout yt:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"small\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the base units for the dimensions of angle and temperature have been automatically set to radians and Kelvin, respectively. If desired, these can be specified using optional arguments when creating the `UnitSystem` object:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "wacky_unit_system = yt.UnitSystem(\"wacky\", \"mile\", \"kg\", \"day\", temperature_unit=\"R\", angle_unit=\"deg\")\n",
+    "wacky_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Though it will rarely be necessary, an MKS-style system of units where a unit of current can be specified as a base unit can also be created using the `current_mks` optional argument:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "mksish_unit_system = yt.UnitSystem(\"mksish\", \"dm\", \"ug\", \"ks\", current_mks_unit=\"mA\")\n",
+    "mksish_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Initializing a `UnitSystem` object only sets up the base units. In this case, all fields will be converted to combinations of these base units based on their dimensionality. However, you may want to specify that fields of a given dimensionality use a compound unit by default instead. For example, you might prefer that in the `\"small\"` unit system that pressures be represented in microdynes per millimeter squared. To do this, set these to be the units of the `\"pressure\"` dimension explicitly:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system[\"pressure\"] = \"udyne/mm**2\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can now look at the `small_unit_system` object and see that these units are now defined for pressure in the \"Other Units\" category:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can do the same for a few other dimensionalities:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system[\"magnetic_field_cgs\"] = \"mG\"\n",
+    "small_unit_system[\"specific_energy\"] = \"cerg/ug\"\n",
+    "small_unit_system[\"velocity\"] = \"cm/s\"\n",
+    "small_unit_system"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -34,6 +34,7 @@
    comparing_units_from_different_datasets
    units_and_plotting
    unit_equivalencies
+   unit_systems
 
 .. note::
 

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/analyzing/units/unit_systems.rst
--- /dev/null
+++ b/doc/source/analyzing/units/unit_systems.rst
@@ -0,0 +1,7 @@
+.. _unit_systems:
+
+Unit Systems
+============
+
+.. notebook:: 7)_Unit_Systems.ipynb
+:skip_exceptions:

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -33,7 +33,10 @@
 In this example, the ``density`` field will return data with units of
 ``g/cm**3`` and the ``thermal_energy`` field will return data units of
 ``erg/g``, so the result will automatically have units of pressure,
-``erg/cm**3``.
+``erg/cm**3``. This assumes the unit system is set to the default, which is
+CGS: if a different unit system is selected, the result will be in the same
+dimensions of pressure but different units. See :ref:`unit_systems` for more
+information.
 
 Once we've defined our function, we need to notify yt that the field is
 available.  The :func:`add_field` function is the means of doing this; it has a
@@ -47,7 +50,7 @@
 
 .. code-block:: python
 
-   yt.add_field("pressure", function=_pressure, units="dyne/cm**2")
+   yt.add_field(("gas", "pressure"), function=_pressure, units="dyne/cm**2")
 
 We feed it the name of the field, the name of the function, and the
 units.  Note that the units parameter is a "raw" string, in the format that yt 
@@ -59,7 +62,7 @@
 as in the ``_pressure`` example above.
 
 Field definitions return array data with units. If the field function returns
-data in a dimensionally equivalent unit (e.g. a ``dyne`` versus a ``N``), the
+data in a dimensionally equivalent unit (e.g. a ``"dyne"`` versus a ``"N"``), the
 field data will be converted to the units specified in ``add_field`` before
 being returned in a data object selection. If the field function returns data
 with dimensions that are incompatibible with units specified in ``add_field``,
@@ -67,7 +70,7 @@
 function returns data in the correct units. Often, this means applying units to
 a dimensionless float or array.
 
-If your field definition influcdes physical constants rather than defining a
+If your field definition includes physical constants rather than defining a
 constant as a float, you can import it from ``yt.utilities.physical_constants``
 to get a predefined version of the constant with the correct units. If you know
 the units your data is supposed to have ahead of time, you can import unit
@@ -82,7 +85,29 @@
 Lastly, if you do not know the units of your field ahead of time, you can
 specify ``units='auto'`` in the call to ``add_field`` for your field.  This will
 automatically determine the appropriate units based on the units of the data
-returned by the field function.
+returned by the field function. This is also a good way to let your derived fields
+be automatically converted to the units of the :ref:`unit system <unit_systems>` in 
+your dataset. 
+
+If ``units='auto'`` is set, it is also required to set the ``dimensions`` keyword
+argument so that error-checking can be done on the derived field to make sure that
+the dimensionality of the returned array and the field are the same:
+
+.. code-block:: python
+
+    import yt
+    from yt.units import dimensions
+    
+    def _pressure(field, data):
+        return (data.ds.gamma - 1.0) * \
+              data["density"] * data["thermal_energy"]
+              
+    yt.add_field(("gas","pressure"), function=_pressure, units="auto",
+                 dimensions=dimensions.pressure)
+
+If ``dimensions`` is not set, an error will be thrown. The ``dimensions`` keyword
+can be a SymPy ``symbol`` object imported from ``yt.units.dimensions``, a compound
+dimension of these, or a string corresponding to one of these objects. 
 
 :func:`add_field` can be invoked in two other ways. The first is by the 
 function decorator :func:`derived_field`. The following code is equivalent to 
@@ -111,10 +136,27 @@
 .. code-block:: python
 
    ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   ds.add_field("pressure", function=_pressure, units="dyne/cm**2")
+   ds.add_field(("gas", "pressure"), function=_pressure, units="dyne/cm**2")
 
-If you find yourself using the same custom-defined fields over and over, you
-should put them in your plugins file as described in :ref:`plugin-file`.
+If you specify fields in this way, you can take advantage of the dataset's 
+:ref:`unit system <unit_systems>` to define the units for you, so that
+the units will be returned in the units of that system:
+
+.. code-block:: python
+
+    ds.add_field(("gas", "pressure"), function=_pressure, units=ds.unit_system["pressure"])
+
+Since the :class:`yt.units.unit_systems.UnitSystem` object returns a :class:`yt.units.unit_object.Unit` object when
+queried, you're not limited to specifying units in terms of those already available. You can specify units for fields
+using basic arithmetic if necessary:
+
+.. code-block:: python
+
+    ds.add_field(("gas", "my_acceleration"), function=_my_acceleration,
+                 units=ds.unit_system["length"]/ds.unit_system["time"]**2)
+
+If you find yourself using the same custom-defined fields over and over, you should put them in your plugins file as
+described in :ref:`plugin-file`.
 
 A More Complicated Example
 --------------------------
@@ -148,7 +190,7 @@
        y_hat /= r
        z_hat /= r
        return xv*x_hat + yv*y_hat + zv*z_hat
-   yt.add_field("my_radial_velocity",
+   yt.add_field(("gas","my_radial_velocity"),
                 function=_my_radial_velocity,
                 units="cm/s",
                 take_log=False,
@@ -195,8 +237,11 @@
 ``function``
      This is a function handle that defines the field
 ``units``
-     This is a string that describes the units. Powers must be in
-     Python syntax (``**`` instead of ``^``).
+     This is a string that describes the units, or a query to a :ref:`UnitSystem <unit_systems>` 
+     object, e.g. ``ds.unit_system["energy"]``. Powers must be in Python syntax (``**`` 
+     instead of ``^``). Alternatively, it may be set to ``"auto"`` to have the units 
+     determined automatically. In this case, the ``dimensions`` keyword must be set to the
+     correct dimensions of the field. 
 ``display_name``
      This is a name used in the plots, for instance ``"Divergence of
      Velocity"``.  If not supplied, the ``name`` value is used.
@@ -219,6 +264,9 @@
 ``force_override``
      (*Advanced*) Overrides the definition of an old field if a field with the
      same name has already been defined.
+``dimensions``
+     Set this if ``units="auto"``. Can be either a string or a dimension object from
+     ``yt.units.dimensions``.
 
 Debugging a Derived Field
 -------------------------
@@ -236,7 +284,7 @@
 
 .. code-block:: python
 
-   @yt.derived_field(name = "funthings")
+   @yt.derived_field(name = ("gas","funthings"))
    def funthings(field, data):
        return data["sillythings"] + data["humorousthings"]**2.0
 
@@ -244,7 +292,7 @@
 
 .. code-block:: python
 
-   @yt.derived_field(name = "funthings")
+   @yt.derived_field(name = ("gas","funthings"))
    def funthings(field, data):
        data._debug()
        return data["sillythings"] + data["humorousthings"]**2.0

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -104,6 +104,43 @@
 have a display name of ``r"\rho"``.  Omitting the ``"display_name"``
 will result in using a capitalized version of the ``"name"``.
 
+.. _bfields-frontend:
+
+Creating Aliases for Magnetic Fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Setting up access to the magnetic fields in your dataset requires special
+handling, because in different unit systems magnetic fields have different
+dimensions (see :ref:`bfields` for an explanation). If your dataset includes 
+magnetic fields, you should include them in ``known_other_fields``, but do
+not set up aliases for them--instead use the special handling function 
+:meth:`~yt.fields.magnetic_fields.setup_magnetic_field_aliases`. It takes
+as arguments the ``FieldInfoContainer`` instance, the field type of the 
+frontend, and the list of magnetic fields from the frontend. Here is an
+example of how this is implemented in the FLASH frontend:
+
+.. code-block:: python
+
+    class FLASHFieldInfo(FieldInfoContainer):
+        known_other_fields = (
+            ...
+            ("magx", (b_units, [], "B_x")), # Note there is no alias here
+            ("magy", (b_units, [], "B_y")),
+            ("magz", (b_units, [], "B_z")),
+            ...
+        )
+
+        def setup_fluid_fields(self):
+            from yt.fields.magnetic_field import \
+                setup_magnetic_field_aliases
+            ...
+            setup_magnetic_field_aliases(self, "flash", ["mag%s" % ax for ax in "xyz"])    
+
+This function should always be imported and called from within the 
+``setup_fluid_fields`` method of the ``FieldInfoContainer``. If this 
+function is used, converting between magnetic fields in different 
+:ref:`unit systems <unit_systems>` will be handled automatically. 
+
 Data Localization Structures
 ----------------------------
 

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -41,9 +41,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -60,9 +58,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "arr = np.random.random(size=(64,64,64))"
@@ -78,9 +74,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = dict(density = (arr, \"g/cm**3\"))\n",
@@ -124,9 +118,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -148,9 +140,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
@@ -177,9 +167,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -205,9 +193,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import h5py\n",
@@ -227,9 +213,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (f.keys())"
@@ -245,9 +229,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "units = [\"gauss\",\"gauss\",\"gauss\", \"g/cm**3\", \"erg/cm**3\", \"K\", \n",
@@ -264,9 +246,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = {k:(v.value,u) for (k,v), u in zip(f.items(),units)}\n",
@@ -276,9 +256,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"Density\"][0].shape, length_unit=250.*cm_per_kpc, bbox=bbox, nprocs=8, \n",
@@ -295,9 +273,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n",
@@ -323,9 +299,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "#Find the min and max of the field\n",
@@ -345,9 +319,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)"
@@ -363,9 +335,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Choose a vector representing the viewing direction.\n",
@@ -375,7 +345,7 @@
     "# Define the width of the image\n",
     "W = 1.5*ds.domain_width[0]\n",
     "# Define the number of pixels to render\n",
-    "Npixels = 512 "
+    "Npixels = 512"
    ]
   },
   {
@@ -388,9 +358,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "cam = ds.camera(c, L, W, Npixels, tf, fields=['Temperature'],\n",
@@ -404,9 +372,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "cam.show()"
@@ -429,9 +395,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import astropy.io.fits as pyfits\n",
@@ -448,9 +412,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
@@ -467,9 +429,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = {}\n",
@@ -489,9 +449,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data[\"velocity_x\"] = data.pop(\"x-velocity\")\n",
@@ -509,9 +467,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n",
@@ -539,9 +495,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "grid_data = [\n",
@@ -566,9 +520,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "for g in grid_data: \n",
@@ -586,9 +538,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
@@ -611,9 +561,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_amr_grids(grid_data, [32, 32, 32])"
@@ -629,9 +577,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -650,7 +596,6 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "* Units will be incorrect unless the data has already been converted to cgs.\n",
     "* Particles may be difficult to integrate.\n",
     "* Data must already reside in memory before loading it in to yt, whether it is generated at runtime or loaded from disk. \n",
     "* Some functions may behave oddly, and parallelism will be disappointing or non-existent in most cases.\n",
@@ -668,7 +613,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -680,4 +625,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -803,7 +803,8 @@
 
 .. rubric:: Caveats
 
-* Please be careful that the units are correctly utilized; yt assumes cgs.
+* Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to
+  other :ref:`unit systems <unit_systems>` is also possible. 
 
 .. _loading-gadget-data:
 
@@ -1065,7 +1066,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Some functions may behave oddly, and parallelism will be disappointing or
   non-existent in most cases.
 * No consistency checks are performed on the index
@@ -1123,7 +1123,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Particles may be difficult to integrate.
 * Data must already reside in memory.
 
@@ -1176,7 +1175,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Integration is not implemented.
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.
@@ -1230,7 +1228,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Integration is not implemented.
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 doc/source/reference/field_list.rst
--- a/doc/source/reference/field_list.rst
+++ b/doc/source/reference/field_list.rst
@@ -3084,10 +3084,10 @@
       def _vorticity_x(field, data):
           f  = (data[ftype, "velocity_z"][sl_center,sl_right,sl_center] -
                 data[ftype, "velocity_z"][sl_center,sl_left,sl_center]) \
-                / (div_fac*just_one(data["index", "dy"]).in_cgs())
+                / (div_fac*just_one(data["index", "dy"]))
           f -= (data[ftype, "velocity_y"][sl_center,sl_center,sl_right] -
                 data[ftype, "velocity_y"][sl_center,sl_center,sl_left]) \
-                / (div_fac*just_one(data["index", "dz"].in_cgs()))
+                / (div_fac*just_one(data["index", "dz"]))
           new_field = data.ds.arr(np.zeros_like(data[ftype, "velocity_z"],
                                                 dtype=np.float64),
                                   f.units)
@@ -3220,7 +3220,7 @@
       def _cylindrical_r(field, data):
           normal = data.get_field_parameter("normal")
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
+          return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_base(unit_system.name)
   
 
 ('index', 'cylindrical_theta')
@@ -3251,7 +3251,7 @@
       def _cylindrical_z(field, data):
           normal = data.get_field_parameter("normal")
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
+          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_base(unit_system.name)
   
 
 ('index', 'disk_angle')
@@ -3424,7 +3424,7 @@
 
       def _spherical_r(field, data):
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
+          return data.ds.arr(get_sph_r(coords), "code_length").in_base(unit_system.name)
   
 
 ('index', 'spherical_theta')

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -176,5 +176,8 @@
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position
 
+from yt.units.unit_systems import UnitSystem
+from yt.units.unit_object import unit_system_registry
+
 from yt.analysis_modules.list_modules import \
     amods

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -133,7 +133,7 @@
             if num_cells == 0:
                 continue
             vol = chunk["cell_volume"].in_cgs().v
-            EM = (chunk["density"]/mp).v**2
+            EM = (chunk["density"]/mp).in_cgs().v**2
             EM *= 0.5*(1.+self.X_H)*self.X_H*vol
 
             if isinstance(self.Zmet, string_types):

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -426,7 +426,7 @@
             # will not be necessary at all, as the final conversion will occur
             # at the display layer.
             if not dl.units.is_dimensionless:
-                dl.convert_to_units("cm")
+                dl.convert_to_units(self.ds.unit_system["length"])
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
             d = chunk[field] * dl

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,7 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
+import yt.units.dimensions as ytdims
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
     YTFieldUnitError, \
@@ -45,7 +46,8 @@
     YTFieldNotParseable, \
     YTFieldNotFound, \
     YTFieldTypeNotFound, \
-    YTDataSelectorNotImplemented
+    YTDataSelectorNotImplemented, \
+    YTDimensionalityError
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -186,11 +188,11 @@
             self.center = None
             return
         elif isinstance(center, YTArray):
-            self.center = self.ds.arr(center.in_cgs())
+            self.center = self.ds.arr(center.copy())
             self.center.convert_to_units('code_length')
         elif isinstance(center, (list, tuple, np.ndarray)):
             if isinstance(center[0], YTQuantity):
-                self.center = self.ds.arr([c.in_cgs() for c in center])
+                self.center = self.ds.arr([c.copy() for c in center])
                 self.center.convert_to_units('code_length')
             else:
                 self.center = self.ds.arr(center, 'code_length')
@@ -935,7 +937,7 @@
         s = "%s (%s): " % (self.__class__.__name__, self.ds)
         for i in self._con_args:
             try:
-                s += ", %s=%s" % (i, getattr(self, i).in_cgs())
+                s += ", %s=%s" % (i, getattr(self, i).in_base(unit_system=self.ds.unit_system))
             except AttributeError:
                 s += ", %s=%s" % (i, getattr(self, i))
         return s
@@ -1207,13 +1209,19 @@
                         # infer the units from the units of the data we get back
                         # from the field function and use these units for future
                         # field accesses
-                        units = str(getattr(fd, 'units', ''))
+                        units = getattr(fd, 'units', '')
+                        if units == '':
+                            dimensions = ytdims.dimensionless
+                        else:
+                            dimensions = units.dimensions
+                            units = str(units.get_base_equivalent(self.ds.unit_system.name))
+                        if fi.dimensions != dimensions:
+                            raise YTDimensionalityError(fi.dimensions, dimensions)
                         fi.units = units
                         self.field_data[field] = self.ds.arr(fd, units)
                         msg = ("Field %s was added without specifying units, "
                                "assuming units are %s")
                         mylog.warn(msg % (fi.name, units))
-                        continue
                     try:
                         fd.convert_to_units(fi.units)
                     except AttributeError:

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -40,7 +40,7 @@
     ParameterFileStore, \
     NoParameterShelf, \
     output_type_registry
-from yt.units.unit_object import Unit
+from yt.units.unit_object import Unit, unit_system_registry
 from yt.units.unit_registry import UnitRegistry
 from yt.fields.derived_field import \
     ValidateSpatial
@@ -59,6 +59,7 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
+from yt.units.unit_systems import create_code_unit_system
 from yt.data_objects.region_expression import \
     RegionExpression
 
@@ -192,7 +193,8 @@
             obj = _cached_datasets[apath]
         return obj
 
-    def __init__(self, filename, dataset_type=None, file_style=None, units_override=None):
+    def __init__(self, filename, dataset_type=None, file_style=None, 
+                 units_override=None, unit_system="cgs"):
         """
         Base class for generating new output types.  Principally consists of
         a *filename* and a *dataset_type* which will be passed on to children.
@@ -235,6 +237,11 @@
         self.set_units()
         self._setup_coordinate_handler()
 
+        create_code_unit_system(self)
+        if unit_system == "code":
+            unit_system = str(self)
+        self.unit_system = unit_system_registry[unit_system]
+
         # Because we need an instantiated class to check the ds's existence in
         # the cache, we move that check to here from __new__.  This avoids
         # double-instantiation.
@@ -870,7 +877,7 @@
         """Converts an array into a :class:`yt.units.yt_array.YTArray`
 
         The returned YTArray will be dimensionless by default, but can be
-        cast to arbitray units using the ``input_units`` keyword argument.
+        cast to arbitrary units using the ``input_units`` keyword argument.
 
         Parameters
         ----------
@@ -916,7 +923,7 @@
         """Converts an scalar into a :class:`yt.units.yt_array.YTQuantity`
 
         The returned YTQuantity will be dimensionless by default, but can be
-        cast to arbitray units using the ``input_units`` keyword argument.
+        cast to arbitrary units using the ``input_units`` keyword argument.
 
         Parameters
         ----------

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -553,7 +553,7 @@
                 values = self.arr(*values)
             else:
                 values = self.arr(values)
-        values = values.in_cgs()
+        values = values.in_base()
 
         if outputs is None:
             outputs = self.all_outputs

diff -r 7a41688d7c0f365f1f46bba48f41499de04f75c1 -r 109872b62c3878385a5e7a79eb2c18e190284dd4 yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -37,6 +37,7 @@
 
 @register_field_plugin
 def setup_angular_momentum(registry, ftype = "gas", slice_info = None):
+    unit_system = registry.ds.unit_system
     def _specific_angular_momentum_x(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         rv = obtain_rvec(data)
@@ -60,26 +61,26 @@
 
     registry.add_field((ftype, "specific_angular_momentum_x"),
                         function=_specific_angular_momentum_x,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
     registry.add_field((ftype, "specific_angular_momentum_y"),
                         function=_specific_angular_momentum_y,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
     registry.add_field((ftype, "specific_angular_momentum_z"),
                         function=_specific_angular_momentum_z,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
 
     create_magnitude_field(registry, "specific_angular_momentum",
-                           "cm**2 / s", ftype=ftype)
+                           unit_system["specific_angular_momentum"], ftype=ftype)
 
     def _angular_momentum_x(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_x"]
     registry.add_field((ftype, "angular_momentum_x"),
                        function=_angular_momentum_x,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     def _angular_momentum_y(field, data):
@@ -87,7 +88,7 @@
              * data[ftype, "specific_angular_momentum_y"]
     registry.add_field((ftype, "angular_momentum_y"),
                        function=_angular_momentum_y,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     def _angular_momentum_z(field, data):
@@ -95,8 +96,8 @@
              * data[ftype, "specific_angular_momentum_z"]
     registry.add_field((ftype, "angular_momentum_z"),
                        function=_angular_momentum_z,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     create_magnitude_field(registry, "angular_momentum",
-                           "g * cm**2 / s", ftype=ftype)
+                           unit_system["angular_momentum"], ftype=ftype)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/203ee09e51ee/
Changeset:   203ee09e51ee
Branch:      yt
User:        jzuhone
Date:        2016-03-29 15:57:18+00:00
Summary:     Merge
Affected #:  105 files

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -795,8 +795,8 @@
    rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
    rather than ``SpecialGrid.__init__()``.
  * Docstrings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
-   fiducial example of a docstring.
+   that occur on an object.  See :ref:`docstrings` below for a fiducial example
+   of a docstring.
  * Use only one top-level import per line. Unless there is a good reason not to,
    imports should happen at the top of the file, after the copyright blurb.
  * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
@@ -843,7 +843,7 @@
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 
-.. _docstrings
+.. _docstrings:
 
 Docstrings
 ----------

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -204,7 +204,7 @@
 --------------------------
 
 After loading a spectrum and specifying the properties of the species
-used to generate the spectrum, an apporpriate fit can be generated. 
+used to generate the spectrum, an appropriate fit can be generated. 
 
 .. code-block:: python
 
@@ -232,7 +232,7 @@
 as all lines with the same group number as ``group#[i]``.
 
 The ``fitted_flux`` is an ndarray of the same size as ``flux`` and 
-``wavelength`` that contains the cummulative absorption spectrum generated 
+``wavelength`` that contains the cumulative absorption spectrum generated
 by the lines contained in ``fitted_lines``.
 
 Saving a Spectrum Fit

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -7,7 +7,7 @@
 disconnected structures within a dataset.  This works by first creating a 
 single contour over the full range of the contouring field, then continually 
 increasing the lower value of the contour until it reaches the maximum value 
-of the field.  As disconnected structures are identified as separate contoures, 
+of the field.  As disconnected structures are identified as separate contours, 
 the routine continues recursively through each object, creating a hierarchy of 
 clumps.  Individual clumps can be kept or removed from the hierarchy based on 
 the result of user-specified functions, such as checking for gravitational 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -93,7 +93,7 @@
 ellipsoid's semi-principle axes. "e0" is the largest semi-principle
 axis vector direction that would have magnitude A but normalized.  
 The "tilt" is an angle measured in radians.  It can be best described
-as after the rotation about the z-axis to allign e0 to x in the x-y
+as after the rotation about the z-axis to align e0 to x in the x-y
 plane, and then rotating about the y-axis to align e0 completely to
 the x-axis, the angle remaining to rotate about the x-axis to align
 both e1 to the y-axis and e2 to the z-axis.

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -65,12 +65,13 @@
 
 Analysis is done by adding actions to the 
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
-Each action is represented by a callback function that will be run on each halo. 
-There are three types of actions:
+Each action is represented by a callback function that will be run on
+each halo.  There are four types of actions:
 
 * Filters
 * Quantities
 * Callbacks
+* Recipes
 
 A list of all available filters, quantities, and callbacks can be found in 
 :ref:`halo_analysis_ref`.  
@@ -213,6 +214,50 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+Recipes
+^^^^^^^
+
+Recipes allow you to create analysis tasks that consist of a series of
+callbacks, quantities, and filters that are run in succession.  An example
+of this is
+:func:`~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities`,
+which calculates virial quantities by first creating a sphere container,
+performing 1D radial profiles, and then interpolating to get values at a
+specified threshold overdensity.  All of these operations are separate
+callbacks, but the recipes allow you to add them to your analysis pipeline
+with one call.  For example,
+
+.. code-block:: python
+
+   hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"])
+
+The available recipes are located in
+``yt/analysis_modules/halo_analysis/halo_recipes.py``.  New recipes can be
+created in the following manner:
+
+.. code-block:: python
+
+   def my_recipe(halo_catalog, fields, weight_field=None):
+       # create a sphere
+       halo_catalog.add_callback("sphere")
+       # make profiles
+       halo_catalog.add_callback("profile", ["radius"], fields,
+                                 weight_field=weight_field)
+       # save the profile data
+       halo_catalog.add_callback("save_profiles", output_dir="profiles")
+
+   # add recipe to the registry of recipes
+   add_recipe("profile_and_save", my_recipe)
+
+
+   # ...  Later on in your script
+   hc.add_recipe("profile_and_save", ["density", "temperature"],
+                 weight_field="cell_mass")
+
+Note, that unlike callback, filter, and quantity functions that take a ``Halo``
+object as the first argument, recipe functions should take a ``HaloCatalog``
+object as the first argument.
+
 Running Analysis
 ----------------
 
@@ -236,7 +281,7 @@
 All callbacks, quantities, and filters are stored in an actions list, 
 meaning that they are executed in the same order in which they were added. 
 This enables the use of simple, reusable, single action callbacks that 
-depend on each other. This also prevents unecessary computation by allowing 
+depend on each other. This also prevents unnecessary computation by allowing 
 the user to add filters at multiple stages to skip remaining analysis if it 
 is not warranted.
 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -13,7 +13,7 @@
 
 A halo mass function can be created for the halos identified in a cosmological 
 simulation, as well as analytic fits using any arbitrary set of cosmological
-paramters. In order to create a mass function for simulated halos, they must
+parameters. In order to create a mass function for simulated halos, they must
 first be identified (using HOP, FOF, or Rockstar, see 
 :ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
 halo masses will then be found, and can be compared to the analytic prediction
@@ -78,7 +78,7 @@
   my_halos = load("rockstar_halos/halos_0.0.bin")
   hmf = HaloMassFcn(halos_ds=my_halos)
 
-A simulation dataset can be passed along with additonal cosmological parameters 
+A simulation dataset can be passed along with additional cosmological parameters 
 to create an analytic mass function.
 
 .. code-block:: python
@@ -106,7 +106,7 @@
 -----------------
 
 * **simulation_ds** (*Simulation dataset object*)
-  The loaded simulation dataset, used to set cosmological paramters.
+  The loaded simulation dataset, used to set cosmological parameters.
   Default : None.
 
 * **halos_ds** (*Halo dataset object*)
@@ -130,7 +130,7 @@
 
 * **omega_baryon0**  (*float*)
   The fraction of the universe made up of baryonic matter. This is not 
-  always stored in the datset and should be checked by hand.
+  always stored in the dataset and should be checked by hand.
   Default : 0.0456.
 
 * **hubble0** (*float*)
@@ -140,14 +140,14 @@
 * **sigma8** (*float*)
   The amplitude of the linear power spectrum at z=0 as specified by 
   the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
-  8 Mpc/h. This is not always stored in the datset and should be 
+  8 Mpc/h. This is not always stored in the dataset and should be 
   checked by hand.
   Default : 0.86.
 
 * **primoridal_index** (*float*)
   This is the index of the mass power spectrum before modification by 
   the transfer function. A value of 1 corresponds to the scale-free 
-  primordial spectrum. This is not always stored in the datset and 
+  primordial spectrum. This is not always stored in the dataset and 
   should be checked by hand.
   Default : 1.0.
 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -40,7 +40,7 @@
 the full halo catalog documentation for further information about
 how to add these quantities and what quantities are available.
 
-You no longer have to iteratre over halos in the ``halo_list``.
+You no longer have to iterate over halos in the ``halo_list``.
 Now a halo dataset can be treated as a regular dataset and 
 all quantities are available by accessing ``all_data``.
 Specifically, all quantities can be accessed as shown:

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -50,7 +50,7 @@
   ``use_minimum_datasets`` set to False, this parameter specifies the 
   fraction of the total box size to be traversed before rerandomizing the 
   projection axis and center.  This was invented to allow light cones with 
-  thin slices to sample coherent large cale structure, but in practice does 
+  thin slices to sample coherent large scale structure, but in practice does 
   not work so well.  Try setting this parameter to 1 and see what happens.  
   Default: 0.0.
 
@@ -74,7 +74,7 @@
 
 A light cone solution consists of a list of datasets spanning a redshift 
 interval with a random orientation for each dataset.  A new solution 
-is calcuated with the 
+is calculated with the 
 :func:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone.calculate_light_cone_solution`
 function:
 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -347,7 +347,7 @@
   be used to control what vector corresponds to the "up" direction in 
   the resulting event list. 
 * ``psf_sigma`` may be specified to provide a crude representation of 
-  a PSF, and corresponds to the standard deviation (in degress) of a 
+  a PSF, and corresponds to the standard deviation (in degrees) of a 
   Gaussian PSF model. 
 
 Let's just take a quick look at the raw events object:

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -246,6 +246,8 @@
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
 
+.. _region-reference:
+
 3D Objects
 """"""""""
 
@@ -256,8 +258,6 @@
       creating a Region covering the entire dataset domain.  It is effectively 
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
-.. _region-reference:
-
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
@@ -313,7 +313,7 @@
     | A ``cut_region`` is a filter which can be applied to any other data 
       object.  The filter is defined by the conditionals present, which 
       apply cuts to the data in the object.  A ``cut_region`` will work
-      for either particle fields or mesh fields, but not on both simulaneously.
+      for either particle fields or mesh fields, but not on both simultaneously.
       For more detailed information and examples, see :ref:`cut-regions`.
 
 **Collection of Data Objects** 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -49,7 +49,7 @@
 
     $ conda install mpi4py
 
-This will install `MPICH2 <https://www.mpich.org/>`_ and will interefere with
+This will install `MPICH2 <https://www.mpich.org/>`_ and will interfere with
 other MPI libraries that are already installed. Therefore, it is preferable to
 use the ``pip`` installation method.
 
@@ -103,7 +103,7 @@
    p.save()
 
 If this script is run in parallel, two of the most expensive operations -
-finding of the maximum density and the projection will be calulcated in
+finding of the maximum density and the projection will be calculated in
 parallel.  If we save the script as ``my_script.py``, we would run it on 16 MPI
 processes using the following Bash command:
 
@@ -121,7 +121,7 @@
 
 You can set the ``communicator`` keyword in the 
 :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.enable_parallelism` 
-call to a specific MPI communicator to specify a subset of availble MPI 
+call to a specific MPI communicator to specify a subset of available MPI 
 processes.  If none is specified, it defaults to ``COMM_WORLD``.
 
 Creating Parallel and Serial Sections in a Script
@@ -251,7 +251,7 @@
 You may define an empty dictionary and include it as the keyword argument 
 ``storage`` to ``piter()``.  Then, during the processing step, you can access
 this dictionary as the ``sto`` object.  After the 
-loop is finished, the dictionary is re-aggragated from all of the processors, 
+loop is finished, the dictionary is re-aggregated from all of the processors, 
 and you can access the contents:
 
 .. code-block:: python

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/time_series_analysis.rst
--- a/doc/source/analyzing/time_series_analysis.rst
+++ b/doc/source/analyzing/time_series_analysis.rst
@@ -79,7 +79,7 @@
 Analyzing an Entire Simulation
 ------------------------------
 
-.. note:: Implemented for: Enzo, Gadget, OWLS.
+.. note:: Implemented for the Enzo, Gadget, OWLS, and Exodus II frontends.
 
 The parameter file used to run a simulation contains all the information 
 necessary to know what datasets should be available.  The ``simulation`` 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/analyzing/units/comoving_units_and_code_units.rst
--- a/doc/source/analyzing/units/comoving_units_and_code_units.rst
+++ b/doc/source/analyzing/units/comoving_units_and_code_units.rst
@@ -12,7 +12,7 @@
 
 yt has additional capabilities to handle the comoving coordinate system used
 internally in cosmological simulations. Simulations that use comoving
-coordinates, all length units have three other counterparts correspoding to
+coordinates, all length units have three other counterparts corresponding to
 comoving units, scaled comoving units, and scaled proper units. In all cases
 'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length
 unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc.

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -60,7 +60,7 @@
 
 # General information about the project.
 project = u'The yt Project'
-copyright = u'2013, the yt Project'
+copyright = u'2013-2016, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,31 +1,30 @@
 import yt
 import numpy as np
 
-# Follow the simple_volume_rendering cookbook for the first part of this.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
+ds = yt.load("MOOSE_sample_data/out.e-s010")
 sc = yt.create_scene(ds)
 cam = sc.camera
-cam.resolution = (512, 512)
-cam.set_width(ds.domain_width/20.0)
 
-# Find the maximum density location, store it in max_c
-v, max_c = ds.find_max('density')
+# save an image at the starting position
+frame = 0
+sc.save('camera_movement_%04i.png' % frame)
+frame += 1
 
-frame = 0
-# Move to the maximum density location over 5 frames
-for _ in cam.iter_move(max_c, 5):
+# Zoom out by a factor of 2 over 5 frames
+for _ in cam.iter_zoom(0.5, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Zoom in by a factor of 10 over 5 frames
-for _ in cam.iter_zoom(10.0, 5):
+# Move to the position [-10.0, 10.0, -10.0] over 5 frames
+pos = ds.arr([-10.0, 10.0, -10.0], 'code_length')
+for _ in cam.iter_move(pos, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Do a rotation over 5 frames
+# Rotate by 180 degrees over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -195,7 +195,11 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 In this recipe, we move a camera through a domain and take multiple volume
-rendering snapshots.
+rendering snapshots. This recipe uses an unstructured mesh dataset (see
+:ref:`unstructured_mesh_rendering`), which makes it easier to visualize what 
+the Camera is doing, but you can manipulate the Camera for other dataset types 
+in exactly the same manner.
+
 See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,7 +65,7 @@
 
 .. yt_cookbook:: light_ray.py 
 
-This script demontrates how to make a light ray from a single dataset.
+This script demonstrates how to make a light ray from a single dataset.
 
 .. _cookbook-single-dataset-light-ray:
 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/cookbook/custom_colorbar_tickmarks.ipynb
--- a/doc/source/cookbook/custom_colorbar_tickmarks.ipynb
+++ b/doc/source/cookbook/custom_colorbar_tickmarks.ipynb
@@ -64,6 +64,24 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "Next, we call `_setup_plots()` to ensure the plot is properly initialized. Without this, the custom tickmarks we are adding will be ignored."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "slc._setup_plots()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "To set custom tickmarks, simply call the `matplotlib` [`set_ticks`](http://matplotlib.org/api/colorbar_api.html#matplotlib.colorbar.ColorbarBase.set_ticks) and [`set_ticklabels`](http://matplotlib.org/api/colorbar_api.html#matplotlib.colorbar.ColorbarBase.set_ticklabels) functions."
    ]
   },

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/cookbook/halo_profiler.py
--- a/doc/source/cookbook/halo_profiler.py
+++ b/doc/source/cookbook/halo_profiler.py
@@ -12,26 +12,16 @@
 # Filter out less massive halos
 hc.add_filter("quantity_value", "particle_mass", ">", 1e14, "Msun")
 
-# attach a sphere object to each halo whose radius extends
-#   to twice the radius of the halo
-hc.add_callback("sphere", factor=2.0)
+# This recipe creates a spherical data container, computes
+# radial profiles, and calculates r_200 and M_200.
+hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"])
 
-# use the sphere to calculate radial profiles of gas density
-# weighted by cell volume in terms of the virial radius
-hc.add_callback("profile", ["radius"],
-                [("gas", "overdensity")],
-                weight_field="cell_volume",
-                accumulation=True,
-                storage="virial_quantities_profiles")
-
-
-hc.add_callback("virial_quantities", ["radius"],
-                profile_storage="virial_quantities_profiles")
-hc.add_callback('delete_attribute', 'virial_quantities_profiles')
-
+# Create a sphere container with radius 5x r_200.
 field_params = dict(virial_radius=('quantity', 'radius_200'))
 hc.add_callback('sphere', radius_field='radius_200', factor=5,
                 field_parameters=field_params)
+
+# Compute profiles of T vs. r/r_200
 hc.add_callback('profile', ['virial_radius_fraction'], 
                 [('gas', 'temperature')],
                 storage='virial_profiles',

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/cookbook/notebook_tutorial.rst
--- a/doc/source/cookbook/notebook_tutorial.rst
+++ b/doc/source/cookbook/notebook_tutorial.rst
@@ -17,7 +17,7 @@
    $ ipython notebook
 
 Depending on your default web browser and system setup this will open a web
-browser and direct you to the notebook dahboard.  If it does not,  you might
+browser and direct you to the notebook dashboard.  If it does not,  you might
 need to connect to the notebook manually.  See the `IPython documentation
 <http://ipython.org/ipython-doc/stable/notebook/notebook.html#starting-the-notebook-server>`_
 for more details.

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -1,5 +1,5 @@
 import yt
-from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
+from yt.visualization.volume_rendering.api import Scene, VolumeSource
 import numpy as np
 
 field = ("gas", "density")
@@ -19,7 +19,7 @@
 tf.grey_opacity = True
 
 # Plane-parallel lens
-cam = Camera(ds, lens_type='plane-parallel')
+cam = sc.add_camera(ds, lens_type='plane-parallel')
 # Set the resolution of tbe final projection.
 cam.resolution = [250, 250]
 # Set the location of the camera to be (x=0.2, y=0.5, z=0.5)
@@ -32,13 +32,12 @@
 # Set the width of the camera, where width[0] and width[1] specify the length and
 # height of final projection, while width[2] in plane-parallel lens is not used.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_plane-parallel.png', sigma_clip=6.0)
 
 # Perspective lens
-cam = Camera(ds, lens_type='perspective')
+cam = sc.add_camera(ds, lens_type='perspective')
 cam.resolution = [250, 250]
 # Standing at (x=0.2, y=0.5, z=0.5), we look at the area of x>0.2 (with some open angle
 # specified by camera width) along the positive x direction.
@@ -49,13 +48,12 @@
 # height of the final projection, while width[2] specifies the distance between the
 # camera and the final image.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_perspective.png', sigma_clip=6.0)
 
 # Stereo-perspective lens
-cam = Camera(ds, lens_type='stereo-perspective')
+cam = sc.add_camera(ds, lens_type='stereo-perspective')
 # Set the size ratio of the final projection to be 2:1, since stereo-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [500, 250]
@@ -65,14 +63,13 @@
 cam.set_width(ds.domain_width*0.5)
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-cam = Camera(dd, lens_type='fisheye')
+cam = sc.add_camera(dd, lens_type='fisheye')
 cam.resolution = [250, 250]
 v, c = ds.find_max(field)
 cam.set_position(c - 0.0005 * ds.domain_width)
@@ -80,13 +77,12 @@
                        north_vector=north_vector)
 cam.set_width(ds.domain_width)
 cam.lens.fov = 360.0
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_fisheye.png', sigma_clip=6.0)
 
 # Spherical lens
-cam = Camera(ds, lens_type='spherical')
+cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
 cam.resolution = [500, 250]
@@ -97,13 +93,12 @@
                        north_vector=north_vector)
 # In (stereo)spherical camera, camera width is not used since the entire volume
 # will be rendered
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_spherical.png', sigma_clip=6.0)
 
 # Stereo-spherical lens
-cam = Camera(ds, lens_type='stereo-spherical')
+cam = sc.add_camera(ds, lens_type='stereo-spherical')
 # Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [1000, 250]
@@ -114,7 +109,6 @@
 # will be rendered
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -158,7 +158,7 @@
 HTML. to simplify versioning of the notebook JSON format, we store notebooks in
 an unevaluated state.
 
-To build the full documentation, you will need yt, jupyter, and all depedencies 
+To build the full documentation, you will need yt, jupyter, and all dependencies 
 needed for yt's analysis modules installed. The following dependencies were 
 used to generate the yt documentation during the release of yt 3.2 in 2015.
 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -65,7 +65,7 @@
 data in a dimensionally equivalent unit (e.g. a ``"dyne"`` versus a ``"N"``), the
 field data will be converted to the units specified in ``add_field`` before
 being returned in a data object selection. If the field function returns data
-with dimensions that are incompatibible with units specified in ``add_field``,
+with dimensions that are incompatible with units specified in ``add_field``,
 you will see an error. To clear this error, you must ensure that your field
 function returns data in the correct units. Often, this means applying units to
 a dimensionless float or array.
@@ -75,7 +75,7 @@
 to get a predefined version of the constant with the correct units. If you know
 the units your data is supposed to have ahead of time, you can import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
-return value of your field function by the appropriate compbination of unit
+return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into
 :class:`~yt.units.yt_array.YTArray` or :class:`~yt.units.yt_array.YTQuantity`
 instances by making use of the

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -19,7 +19,7 @@
 checking in any code that breaks existing functionality.  To further this goal,
 an automatic buildbot runs the test suite after each code commit to confirm
 that yt hasn't broken recently.  To supplement this effort, we also maintain a
-`continuous integration server <http://tests.yt-project.org>`_ that runs the
+`continuous integration server <https://tests.yt-project.org>`_ that runs the
 tests with each commit to the yt version control repository.
 
 .. _unit_testing:
@@ -471,8 +471,87 @@
 Another good example of an image comparison test is the
 ``PlotWindowAttributeTest`` defined in the answer testing framework and used in
 ``yt/visualization/tests/test_plotwindow.py``. This test shows how a new answer
-test subclass can be used to programitically test a variety of different methods
+test subclass can be used to programmatically test a variety of different methods
 of a complicated class using the same test class. This sort of image comparison
 test is more useful if you are finding yourself writing a ton of boilerplate
 code to get your image comparison test working.  The ``GenericImageTest`` is
 more useful if you only need to do a one-off image comparison test.
+
+Enabling Answer Tests on Jenkins
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Before any code is added to or modified in the yt codebase, each incoming
+changeset is run against all available unit and answer tests on our `continuous
+integration server <http://tests.yt-project.org>`_. While unit tests are
+autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
+answer tests require definition of which set of tests constitute to a given
+answer. Configuration for the integration server is stored in
+*tests/tests_2.7.yaml* in the main yt repository:
+
+.. code-block:: yaml
+
+   answer_tests:
+      local_artio_270:
+         - yt/frontends/artio/tests/test_outputs.py
+   # ...
+   other_tests:
+      unittests:
+         - '-v'
+         - '-s'
+
+Each element under *answer_tests* defines answer name (*local_artio_270* in above
+snippet) and specifies a list of files/classes/methods that will be validated
+(*yt/frontends/artio/tests/test_outputs.py* in above snippet). On the testing
+server it is translated to:
+
+.. code-block:: bash
+
+   $ nosetests --with-answer-testing --local --local-dir ... --answer-big-data \
+      --answer-name=local_artio_270 \
+      yt/frontends/artio/tests/test_outputs.py
+
+If the answer doesn't exist on the server yet, ``nosetests`` is run twice and
+during first pass ``--answer-store`` is added to the commandline. 
+
+Updating Answers
+~~~~~~~~~~~~~~~~
+
+In order to regenerate answers for a particular set of tests it is sufficient to
+change the answer name in *tests/tests_2.7.yaml* e.g.:
+
+.. code-block:: diff
+
+   --- a/tests/tests_2.7.yaml
+   +++ b/tests/tests_2.7.yaml
+   @@ -25,7 +25,7 @@
+        - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+        - yt/frontends/owls_subfind/tests/test_outputs.py
+   
+   -  local_owls_270:
+   +  local_owls_271:
+        - yt/frontends/owls/tests/test_outputs.py
+   
+      local_pw_270:
+
+would regenerate answers for OWLS frontend.
+
+Adding New Answer Tests
+~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to add a new set of answer tests, it is sufficient to extend the
+*answer_tests* list in *tests/tests_2.7.yaml* e.g.: 
+
+.. code-block:: diff
+
+   --- a/tests/tests_2.7.yaml
+   +++ b/tests/tests_2.7.yaml
+   @@ -60,6 +60,10 @@
+        - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+        - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
+    
+   +  local_gdf_270:
+   +    - yt/frontends/gdf/tests/test_outputs.py
+   +
+   +
+    other_tests:
+      unittests:
+

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -369,7 +369,7 @@
 
 This particular dataset has two meshes in it, both of which are made of 8-node hexes.
 yt uses a field name convention to access these different meshes in plots and data
-objects. To see all the fields found in a particlular dataset, you can do:
+objects. To see all the fields found in a particular dataset, you can do:
 
 .. code-block:: python
     
@@ -540,7 +540,7 @@
 
 * ``CDELTx``: The pixel width in along axis ``x``
 * ``CRVALx``: The coordinate value at the reference position along axis ``x``
-* ``CRPIXx``: The the reference pixel along axis ``x``
+* ``CRPIXx``: The reference pixel along axis ``x``
 * ``CTYPEx``: The projection type of axis ``x``
 * ``CUNITx``: The units of the coordinate along axis ``x``
 * ``BTYPE``: The type of the image
@@ -870,7 +870,7 @@
 ``over_refine_factor``.  They are weak proxies for each other.  The first,
 ``n_ref``, governs how many particles in an oct results in that oct being
 refined into eight child octs.  Lower values mean higher resolution; the
-default is 64.  The secon parameter, ``over_refine_factor``, governs how many
+default is 64.  The second parameter, ``over_refine_factor``, governs how many
 cells are in a given oct; the default value of 1 corresponds to 8 cells.
 The number of cells in an oct is defined by the expression
 ``2**(3*over_refine_factor)``.
@@ -1118,8 +1118,10 @@
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
    ds = yt.load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
 
-where in this exampe the particle position fields have been assigned. ``number_of_particles`` must be the same size as the particle
-arrays. If no particle arrays are supplied then ``number_of_particles`` is assumed to be zero. 
+where in this example the particle position fields have been assigned.
+``number_of_particles`` must be the same size as the particle arrays. If no
+particle arrays are supplied then ``number_of_particles`` is assumed to be
+zero. 
 
 .. rubric:: Caveats
 
@@ -1153,7 +1155,7 @@
    coordinates,connectivity = yt.hexahedral_connectivity(xgrid,ygrid,zgrid)
 
 will define the (x,y,z) coordinates of the hexahedral cells and
-information about that cell's neighbors such that the celll corners
+information about that cell's neighbors such that the cell corners
 will be a grid of points constructed as the Cartesion product of
 xgrid, ygrid, and zgrid.
 
@@ -1386,8 +1388,8 @@
 ---------
 
 `PyNE <http://pyne.io/>`_ is an open source nuclear engineering toolkit
-maintained by the PyNE developement team (`pyne-dev at googlegroups.com
-<pyne-dev%40googlegroups.com>`_). PyNE meshes utilize the Mesh-Oriented datABase
+maintained by the PyNE developement team (pyne-dev at googlegroups.com).
+PyNE meshes utilize the Mesh-Oriented datABase
 `(MOAB) <http://trac.mcs.anl.gov/projects/ITAPS/wiki/MOAB/>`_ and can be
 Cartesian or tetrahedral. In addition to field data, pyne meshes store pyne
 Material objects which provide a rich set of capabilities for nuclear

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -176,7 +176,7 @@
 cells from the parent grid will be duplicated (appropriately) to fill the 
 covering grid.
 
-Let's say we now want to look at that entire data volume and sample it at the 
+Let's say we now want to look at that entire data volume and sample it at
 a higher resolution (i.e. level 2).  As stated above, we'll be oversampling
 under-refined regions, but that's OK.  We must also increase the resolution 
 of our output array by a factor of 2^2 in each direction to hold this new 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -141,7 +141,7 @@
   $ grep -r SlicePlot *         (or $ grin SlicePlot)
 
 This will print a number of locations in the yt source tree where ``SlicePlot``
-is mentioned.  You can now followup on this and open up the files that have
+is mentioned.  You can now follow-up on this and open up the files that have
 references to ``SlicePlot`` (particularly the one that defines SlicePlot) and
 inspect their contents for problems or clarification.
 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -175,6 +175,7 @@
 .. toctree::
    :hidden:
 
+   intro/index
    installing
    yt Quickstart <quickstart/index>
    yt3differences

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -19,7 +19,7 @@
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
   will probably want to use the bash all-in-one installation script.  This builds 
-  python, numpy, matplotlib, and yt from source to set up an isolated scientific 
+  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific 
   python environment inside of a single folder in your home directory. See
   :ref:`install-script` for more details.
 
@@ -35,9 +35,9 @@
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
   let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via linux package managers so long as you
-  have the the necessary compilers installed (e.g. the ``build-essentials``
-  package on debian and ubuntu).
+  for python environments set up via Linux package managers so long as you
+  have the necessary compilers installed (e.g. the ``build-essentials``
+  package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -199,13 +199,12 @@
 
 If you do not want to install the full anaconda python distribution, you can
 install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download a recent version of the
-``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
-system architecture. Next, run the script, e.g.:
+visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...`` 
+script for your platform and system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  bash Miniconda-3.3.0-Linux-x86_64.sh
+  bash Miniconda-latest-Linux-x86_64.sh
 
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
@@ -214,7 +213,28 @@
 
   conda install yt
 
-which will install yt along with all of its dependencies.
+which will install stable branch of yt along with all of its dependencies.
+
+If you would like to install latest development version of yt, you can download
+it from our custom anaconda channel:
+
+.. code-block:: bash
+
+  conda install -c http://use.yt/with_conda/ yt
+
+New packages for development branch are built after every pull request is
+merged. In order to make sure you are running latest version, it's recommended
+to update frequently:
+
+.. code-block:: bash
+
+  conda update -c http://use.yt/with_conda/ yt
+
+Location of our channel can be added to ``.condarc`` to avoid retyping it during
+each *conda* invocation. Please refer to `Conda Manual
+<http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
+detailed instructions.
+
 
 Obtaining Source Code
 ^^^^^^^^^^^^^^^^^^^^^
@@ -252,7 +272,7 @@
 
   git clone https://github.com/conda/conda-recipes
 
-Then navigate to the repository root and invoke `conda build`:
+Then navigate to the repository root and invoke ``conda build``:
 
 .. code-block:: bash
 
@@ -290,7 +310,7 @@
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython cython h5py nose sympy
+  $ pip install numpy matplotlib cython h5py nose sympy
 
 If you're using IPython notebooks, you can install its dependencies
 with ``pip`` as well:
@@ -366,7 +386,7 @@
   yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
-any changes from bitbucket, and then recompile yt if necessary.
+any changes from Bitbucket, and then recompile yt if necessary.
 
 .. _testing-installation:
 
@@ -397,7 +417,7 @@
 
 With the release of version 3.0 of yt, development of the legacy yt 2.x series
 has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the forseeable future.  This makes it easy to use scripts written
+series for the foreseeable future.  This makes it easy to use scripts written
 for older versions of yt without substantially updating them to support the
 new field naming or unit systems in yt version 3.
 
@@ -411,7 +431,7 @@
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
 repository, update to the desired version, and rebuild the source (some of the
-c code requires a compilation step for big changes like this):
+C code requires a compilation step for big changes like this):
 
 .. code-block:: bash
 
@@ -419,7 +439,7 @@
   hg update <desired-version>
   python setup.py develop
 
-Valid versions to jump to are described in :ref:`branches-of-yt`).
+Valid versions to jump to are described in :ref:`branches-of-yt`.
 
 You can check which version of yt you have installed by invoking ``yt version``
 at the command line.  If you encounter problems, see :ref:`update-errors`.

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/intro/index.rst
--- a/doc/source/intro/index.rst
+++ b/doc/source/intro/index.rst
@@ -49,7 +49,7 @@
 the :ref:`units system <units>` works to tag every individual field and 
 quantity with a physical unit (e.g. cm, AU, kpc, Mpc, etc.), and it describes 
 ways of analyzing multiple chronological data outputs from the same underlying 
-dataset known as :ref:`time series <time-series-analysis`.  Lastly, it includes 
+dataset known as :ref:`time series <time-series-analysis>`.  Lastly, it includes 
 information on how to enable yt to operate :ref:`in parallel over multiple 
 processors simultaneously <parallel-computation>`.
 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -472,6 +472,8 @@
    ~yt.analysis_modules.halo_analysis.halo_quantities.HaloQuantity
    ~yt.analysis_modules.halo_analysis.halo_quantities.bulk_velocity
    ~yt.analysis_modules.halo_analysis.halo_quantities.center_of_mass
+   ~yt.analysis_modules.halo_analysis.halo_recipes.HaloRecipe
+   ~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities
 
 Halo Finding
 ^^^^^^^^^^^^

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/reference/index.rst
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -14,5 +14,6 @@
    command-line
    api/api
    configuration
+   python_introduction
    field_list
    changelog

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/reference/python_introduction.rst
--- a/doc/source/reference/python_introduction.rst
+++ b/doc/source/reference/python_introduction.rst
@@ -315,7 +315,7 @@
 Let's try this out with a for loop.  First type ``for i in range(10):`` and
 press enter.  This will change the prompt to be three periods, instead of three
 greater-than signs, and you will be expected to hit the tab key to indent.
-Then type "print i", press enter, and then instead of indenting again, press
+Then type "print(i)", press enter, and then instead of indenting again, press
 enter again.  The entire entry should look like this::
 
    >>> for i in range(10):

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions.  TransferFunctionHelper is a utility class that makes it easy to visualize he probability density functions of yt fields that you might want to volume render.  This makes it easier to choose a nice transfer function that highlights interesting physical regimes.\n",
+    "Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions.  Creating a custom transfer function is a process that usually involves some trial-and-error. TransferFunctionHelper is a utility class designed to help you visualize the probability density functions of yt fields that you might want to volume render.  This makes it easier to choose a nice transfer function that highlights interesting physical regimes.\n",
     "\n",
     "First, we set up our namespace and define a convenience function to display volume renderings inline in the notebook.  Using `%matplotlib inline` makes it so matplotlib plots display inline in the notebook."
    ]
@@ -22,7 +22,6 @@
     "from IPython.core.display import Image\n",
     "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
     "from yt.visualization.volume_rendering.render_source import VolumeSource\n",
-    "from yt.visualization.volume_rendering.camera import Camera\n",
     "\n",
     "def showme(im):\n",
     "    # screen out NaNs\n",
@@ -133,8 +132,8 @@
     "tfh.set_log(True)\n",
     "tfh.build_transfer_function()\n",
     "tfh.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')\n",
-    "tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=10.0)\n",
-    "tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.)\n",
+    "tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds')\n",
+    "tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r')\n",
     "\n",
     "tfh.plot(profile_field='cell_mass')"
    ]
@@ -143,7 +142,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Finally, let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
+    "Let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
    ]
   },
   {
@@ -167,7 +166,55 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
+    "That looks okay, but the red gas (associated with temperatures between 1e6 and 1e8 K) is a bit hard to see in the image. To fix this, we can make that gas contribute a larger alpha value to the image by using the ``scale`` keyword argument in ``map_to_colormap``."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "tfh2 = TransferFunctionHelper(ds)\n",
+    "tfh2.set_field('temperature')\n",
+    "tfh2.set_bounds()\n",
+    "tfh2.set_log(True)\n",
+    "tfh2.build_transfer_function()\n",
+    "tfh2.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')\n",
+    "tfh2.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=5.0)\n",
+    "tfh2.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.0)\n",
+    "\n",
+    "tfh2.plot(profile_field='cell_mass')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the height of the red portion of the transfer function has increased by a factor of 5.0. If we use this transfer function to make the final image:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "source.set_transfer_function(tfh2.tf)\n",
+    "im3 = sc.render()\n",
+    "\n",
+    "showme(im3[:,:,:3])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The red gas is now much more prominant in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
    ]
   }
  ],

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
--- a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
+++ b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
@@ -18,7 +18,7 @@
     "import yt\n",
     "import numpy as np\n",
     "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
-    "from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource\n",
+    "from yt.visualization.volume_rendering.api import Scene, VolumeSource\n",
     "\n",
     "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
     "sc = yt.create_scene(ds)"
@@ -199,7 +199,7 @@
    },
    "outputs": [],
    "source": [
-    "cam = Camera(ds, lens_type='perspective')\n",
+    "cam = sc.add_camera(ds, lens_type='perspective')\n",
     "\n",
     "# Standing at (x=0.05, y=0.5, z=0.5), we look at the area of x>0.05 (with some open angle\n",
     "# specified by camera width) along the positive x direction.\n",
@@ -213,7 +213,6 @@
     "# The width determines the opening angle\n",
     "cam.set_width(ds.domain_width * 0.5)\n",
     "\n",
-    "sc.camera = cam\n",
     "print (sc.camera)"
    ]
   },

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -47,7 +47,7 @@
 store them in your :ref:`plugin-file` for access to them in every future yt 
 session.  The example below creates two custom colormaps, one that has
 three equally spaced bars of blue, white and red, and the other that 
-interpolates in increasing lengthed intervals from black to red, to green, 
+interpolates in increasing lengthen intervals from black to red, to green, 
 to blue.  These will be accessible for the rest of the yt session as 
 'french_flag' and 'weird'.  See 
 :func:`~yt.visualization.color_maps.make_colormap` and 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -16,7 +16,6 @@
    manual_plotting
    volume_rendering
    unstructured_mesh_rendering
-   hardware_volume_rendering
    sketchfab
    mapserver
    streamlines

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -415,9 +415,19 @@
 determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
 thinner.
 
+The above example all involve 8-node hexahedral mesh elements. Here is another example from
+a dataset that uses 6-node wedge elements:
+
+.. python-script::
+   
+   import yt
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+   sl = yt.SlicePlot(ds, 2, ('connect2', 'diffused'))
+   sl.save()
+
 Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
 slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
-an example using another MOOSE dataset:
+an example using another MOOSE dataset that uses triangular mesh elements:
 
 .. python-script::
 

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -80,8 +80,8 @@
 ``export_ply``, which will write to a file and optionally sample a field at
 every face or vertex, outputting a color value to the file as well.  This file
 can then be viewed in MeshLab, Blender or on the website `Sketchfab.com
-<Sketchfab.com>`_.  But if you want to view it on Sketchfab, there's an even
-easier way!
+<https://sketchfab.com>`_.  But if you want to view it on Sketchfab, there's an
+even easier way!
 
 Exporting to Sketchfab
 ----------------------

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -214,6 +214,29 @@
     # render and save
     sc.save()
 
+Here is an example using 6-node wedge elements:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+
+   # create a default scene
+   sc = yt.create_scene(ds, ('connect2', 'diffused'))
+
+   # override the default colormap
+   ms = sc.get_source(0)
+   ms.cmap = 'Eos A'
+
+   # adjust the camera position and orientation
+   cam = sc.camera
+   cam.set_position(ds.arr([1.0, -1.0, 1.0], 'code_length'))
+   cam.width = ds.arr([1.5, 1.5, 1.5], 'code_length')
+
+   # render and save
+   sc.save()
+
 Another example, this time plotting the temperature field from a 20-node hex 
 MOOSE dataset:
 
@@ -273,7 +296,7 @@
     # adjust the camera position and orientation
     cam = sc.camera
     camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
     cam.set_position(camera_position, north_vector)
     
@@ -292,7 +315,6 @@
 .. python-script::
 
     import yt
-    from yt.visualization.volume_rendering.api import Camera
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
@@ -304,15 +326,12 @@
     ms.cmap = 'Eos A'
    
     # Create a perspective Camera
-    cam = Camera(ds, lens_type='perspective')
+    cam = sc.add_camera(ds, lens_type='perspective')
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
     north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
    
-    # tell our scene to use it
-    sc.camera = cam
-   
     # increase the default resolution
     cam.resolution = (800, 800)
    
@@ -329,7 +348,7 @@
 .. python-script::
 
     import yt
-    from yt.visualization.volume_rendering.api import MeshSource, Camera, Scene
+    from yt.visualization.volume_rendering.api import MeshSource, Scene
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
@@ -337,16 +356,13 @@
     sc = Scene()
 
     # set up our Camera
-    cam = Camera(ds)
+    cam = sc.add_camera(ds)
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
                      ds.arr([0.0, -1.0, 0.0], 'dimensionless'))
     cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
     cam.resolution = (800, 800)
 
-    # tell the scene to use it
-    sc.camera = cam
-
     # create two distinct MeshSources from 'connect1' and 'connect2'
     ms1 = MeshSource(ds, ('connect1', 'diffused'))
     ms2 = MeshSource(ds, ('connect2', 'diffused'))
@@ -362,7 +378,7 @@
 ^^^^^^^^^^^^^
 
 Here are a couple of example scripts that show how to create image frames that 
-can later be stiched together into a movie. In the first example, we look at a 
+can later be stitched together into a movie. In the first example, we look at a 
 single dataset at a fixed time, but we move the camera around to get a different
 vantage point. We call the rotate() method 300 times, saving a new image to the 
 disk each time.
@@ -407,7 +423,7 @@
 .. code-block:: python
 
     import yt
-    from yt.visualization.volume_rendering.api import MeshSource, Camera
+    from yt.visualization.volume_rendering.api import MeshSource
     import pylab as plt
 
     NUM_STEPS = 127
@@ -432,7 +448,7 @@
 	# set up the camera here. these values were arrived by
 	# calling pitch, yaw, and roll in the notebook until I
 	# got the angle I wanted.
-	cam = Camera(ds)
+	sc.add_camera(ds)
 	camera_position = ds.arr([0.1, 0.0, 0.1], 'code_length')
 	cam.focus = ds.domain_center
 	north_vector = ds.arr([-0.3032476, -0.71782557, 0.62671153], 'dimensionless')

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -236,12 +236,13 @@
 The :class:`~yt.visualization.volume_rendering.camera.Camera` object
 is what it sounds like, a camera within the Scene.  It possesses the 
 quantities:
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
- * ``north_vector`` - a vector defining the "up" direction in an image
- * :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
+ 
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
+* ``north_vector`` - a vector defining the "up" direction in an image
+* :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
 
 .. _camera_movement:
 
@@ -482,7 +483,7 @@
 their combination, are described below.
 
 MPI Parallelization
-+++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^
 
 Currently the volume renderer is parallelized using MPI to decompose the volume
 by attempting to split up the
@@ -516,7 +517,7 @@
 For more information about enabling parallelism, see :ref:`parallel-computation`.
 
 OpenMP Parallelization
-++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^
 
 The volume rendering also parallelized using the OpenMP interface in Cython.
 While the MPI parallelization is done using domain decomposition, the OpenMP
@@ -532,7 +533,7 @@
 by default by modifying the environment variable OMP_NUM_THREADS. 
 
 Running in Hybrid MPI + OpenMP
-++++++++++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 The two methods for volume rendering parallelization can be used together to
 leverage large supercomputing resources.  When choosing how to balance the

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -84,7 +84,7 @@
   external code**
   Mesh fields that exist on-disk in an output file can be read in using whatever
   name is used by the output file.  On-disk fields are always returned in code
-  units.  The full field name will be will be ``(code_name, field_name)``. See
+  units.  The full field name will be ``(code_name, field_name)``. See
   :ref:`field-list`.
 * **Particle fields are now more obviously different than mesh fields**
   Particle fields on-disk will also be in code units, and will be named
@@ -247,8 +247,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Wherever possible, we have attempted to replace the term "parameter file"
-(i.e., ``pf``) with the term "dataset."  In yt-3.0, all of the 
-the ``pf`` atrributes of objects are now ``ds`` or ``dataset`` attributes.
+(i.e., ``pf``) with the term "dataset."  In yt-3.0, all of
+the ``pf`` attributes of objects are now ``ds`` or ``dataset`` attributes.
 
 Hierarchy is Now Index
 ^^^^^^^^^^^^^^^^^^^^^^
@@ -262,7 +262,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Derived quantities can now be accessed via a function that hangs off of the
-``quantities`` atribute of data objects. Instead of
+``quantities`` attribute of data objects. Instead of
 ``dd.quantities['TotalMass']()``, you can now use ``dd.quantities.total_mass()``
 to do the same thing. All derived quantities can be accessed via a function that
 hangs off of the `quantities` attribute of data objects.

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,5 +1,7 @@
 #!python
-import os, re
+from __future__ import print_function
+import os
+import re
 from distutils.version import LooseVersion
 from yt.mods import *
 from yt.data_objects.data_containers import YTDataContainer
@@ -16,8 +18,8 @@
 
 try:
     import IPython
-except:
-    print 'ipython is not available. using default python interpreter.'
+except ImportError:
+    print('ipython is not available. using default python interpreter.')
     import code
     import sys
     code.interact(doc, None, namespace)
@@ -70,7 +72,7 @@
 Feel free to edit this file to customize your ipython experience.
 
 Note that as such this file does nothing, for backwards compatibility.
-Consult e.g. file 'ipy_profile_sh.py' for an example of the things 
+Consult e.g. file 'ipy_profile_sh.py' for an example of the things
 you can do here.
 
 See http://ipython.scipy.org/moin/IpythonExtensionApi for detailed
@@ -96,7 +98,7 @@
 # http://pymel.googlecode.com/svn/trunk/tools/ipymel.py
 # We'll start with some fields.
 
-import re
+
 def yt_fieldname_completer(self, event):
     """Match dictionary completions"""
     #print "python_matches", event.symbol
@@ -110,7 +112,7 @@
 
     if not m:
         raise try_next
-    
+
     expr, attr = m.group(1, 3)
     #print "COMPLETING ON ", expr, attr
     #print type(self.Completer), dir(self.Completer)
@@ -122,9 +124,9 @@
         try:
             obj = eval(expr, self.Completer.global_namespace)
         except:
-            raise IPython.ipapi.TryNext 
-        
-    if isinstance(obj, (YTDataContainer, ) ):
+            raise IPython.ipapi.TryNext
+
+    if isinstance(obj, YTDataContainer):
         #print "COMPLETING ON THIS THING"
         all_fields = [f for f in sorted(
                 obj.ds.field_list + obj.ds.derived_field_list)]
@@ -135,6 +137,6 @@
 
     raise try_next
 
-ip.set_hook('complete_command', yt_fieldname_completer , re_key = ".*" )
+ip.set_hook('complete_command', yt_fieldname_completer, re_key = ".*")
 
 ip_shell.mainloop(**kwargs)

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import hglib
 import requests
 import shutil
@@ -6,6 +7,7 @@
 from datetime import datetime
 from distutils.version import LooseVersion
 from time import strptime, mktime
+from yt.extern.six.moves import input
 
 MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
                       "yt/pullrequests/?state=MERGED")
@@ -280,17 +282,17 @@
             if commit_already_on_stable(repo_path, commits[0]) is True:
                 continue
             message = "hg graft %s\n" % commits[0]
-        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
-        print "To backport, issue the following command(s):\n"
-        print message
-        raw_input('Press any key to continue')
+        print("PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc)
+        print("To backport, issue the following command(s):\n")
+        print(message)
+        input('Press any key to continue')
 
 
 if __name__ == "__main__":
-    print ""
-    print "Gathering PR information, this may take a minute."
-    print "Don't worry, yt loves you."
-    print ""
+    print("")
+    print("Gathering PR information, this may take a minute.")
+    print("Don't worry, yt loves you.")
+    print("")
     repo_path = clone_new_repo()
     try:
         last_major_release = get_first_commit_after_last_major_release(repo_path)
@@ -308,11 +310,11 @@
         del inv_map[None]
 
         inv_map = screen_already_backported(repo_path, inv_map)
-        print "In another terminal window, navigate to the following path:"
-        print "%s" % repo_path
-        raw_input("Press any key to continue")
+        print("In another terminal window, navigate to the following path:")
+        print("%s" % repo_path)
+        input("Press any key to continue")
         backport_pr_commits(repo_path, inv_map, last_stable, prs)
-        raw_input(
+        input(
             "Now you need to push your backported changes. The temporary\n"
             "repository currently being used will be deleted as soon as you\n"
             "press any key.")

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
 from setuptools.command.build_py import build_py as _build_py
 from setupext import \
     check_for_openmp, check_for_pyembree, read_embree_location, \
-    get_mercurial_changeset_id
+    get_mercurial_changeset_id, in_conda_env
 
 if sys.version_info < (2, 7):
     print("yt currently requires Python version 2.7")
@@ -127,7 +127,9 @@
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=["m"], depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
+              libraries=["m"],
+              depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
@@ -177,7 +179,8 @@
                        "yt/utilities/lib/kdtree.h",
                        "yt/utilities/lib/fixed_interpolator.h",
                        "yt/utilities/lib/fixed_interpolator.pxd",
-                       "yt/utilities/lib/field_interpolation_tables.pxd"]),
+                       "yt/utilities/lib/field_interpolation_tables.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.element_mappings",
               ["yt/utilities/lib/element_mappings.pyx"],
               libraries=["m"], depends=["yt/utilities/lib/element_mappings.pxd"]),
@@ -254,14 +257,21 @@
     ]
 
     embree_prefix = os.path.abspath(read_embree_location())
+    embree_inc_dir = [os.path.join(embree_prefix, 'include')]
+    embree_lib_dir = [os.path.join(embree_prefix, 'lib')]
+    if in_conda_env():
+        conda_basedir = os.path.dirname(os.path.dirname(sys.executable))
+        embree_inc_dir.append(os.path.join(conda_basedir, 'include'))
+        embree_lib_dir.append(os.path.join(conda_basedir, 'lib'))
+        
     if _platform == "darwin":
         embree_lib_name = "embree.2"
     else:
         embree_lib_name = "embree"
 
     for ext in embree_extensions:
-        ext.include_dirs.append(os.path.join(embree_prefix, 'include'))
-        ext.library_dirs.append(os.path.join(embree_prefix, 'lib'))
+        ext.include_dirs += embree_inc_dir
+        ext.library_dirs += embree_lib_dir
         ext.language = "c++"
         ext.libraries += ["m", embree_lib_name]
 
@@ -352,7 +362,11 @@
                  "Operating System :: POSIX :: AIX",
                  "Operating System :: POSIX :: Linux",
                  "Programming Language :: C",
-                 "Programming Language :: Python",
+                 "Programming Language :: Python :: 2",
+                 "Programming Language :: Python :: 2.7",
+                 "Programming Language :: Python :: 3",
+                 "Programming Language :: Python :: 3.4",
+                 "Programming Language :: Python :: 3.5",
                  "Topic :: Scientific/Engineering :: Astronomy",
                  "Topic :: Scientific/Engineering :: Physics",
                  "Topic :: Scientific/Engineering :: Visualization"],

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -59,6 +59,8 @@
         return None
     return os.path.dirname(fn)
 
+def in_conda_env():
+    return any(s in sys.version for s in ("Anaconda", "Continuum"))
 
 def read_embree_location():
     '''

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -1,54 +1,100 @@
 import sys
 import os
 import yaml
-import multiprocessing as mp
+import multiprocessing
 import nose
-import glob
-from contextlib import closing
+from cStringIO import StringIO
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
 
 
-def run_job(argv):
-    with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
-        cur_stderr = sys.stderr
-        sys.stderr = fstderr
-        answer = argv[0]
+class NoseWorker(multiprocessing.Process):
+
+    def __init__(self, task_queue, result_queue):
+        multiprocessing.Process.__init__(self)
+        self.task_queue = task_queue
+        self.result_queue = result_queue
+
+    def run(self):
+        proc_name = self.name
+        while True:
+            next_task = self.task_queue.get()
+            if next_task is None:
+                print("%s: Exiting" % proc_name)
+                self.task_queue.task_done()
+                break
+            print '%s: %s' % (proc_name, next_task)
+            result = next_task()
+            self.task_queue.task_done()
+            self.result_queue.put(result)
+        return
+
+class NoseTask(object):
+    def __init__(self, argv):
+        self.argv = argv
+        self.name = argv[0]
+
+    def __call__(self):
+        old_stderr = sys.stderr
+        sys.stderr = mystderr = StringIO()
         test_dir = ytcfg.get("yt", "test_data_dir")
         answers_dir = os.path.join(test_dir, "answers")
-        if not os.path.isdir(os.path.join(answers_dir, answer)):
-            nose.run(argv=argv + ['--answer-store'],
+        if '--with-answer-testing' in self.argv and \
+                not os.path.isdir(os.path.join(answers_dir, self.name)):
+            nose.run(argv=self.argv + ['--answer-store'],
                      addplugins=[AnswerTesting()], exit=False)
-        nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
-    sys.stderr = cur_stderr
+        nose.run(argv=self.argv, addplugins=[AnswerTesting()], exit=False)
+        sys.stderr = old_stderr
+        return mystderr.getvalue()
 
-if __name__ == "__main__":
+    def __str__(self):
+        return 'WILL DO self.name = %s' % self.name
+
+
+def generate_tasks_input():
     test_dir = ytcfg.get("yt", "test_data_dir")
     answers_dir = os.path.join(test_dir, "answers")
     with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
         tests = yaml.load(obj)
 
-    base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+    base_argv = ['--local-dir=%s' % answers_dir, '-v',
                  '--with-answer-testing', '--answer-big-data', '--local']
-    args = [['unittests', '-v', '-s', '--nologcapture']]
-    for answer in list(tests.keys()):
+    args = []
+
+    for test in list(tests["other_tests"].keys()):
+        args.append([test] + tests["other_tests"][test])
+    for answer in list(tests["answer_tests"].keys()):
         argv = [answer]
         argv += base_argv
-        argv.append('--xunit-file=%s.xml' % answer)
         argv.append('--answer-name=%s' % answer)
-        argv += tests[answer]
+        argv += tests["answer_tests"][answer]
         args.append(argv)
-    
-    processes = [mp.Process(target=run_job, args=(args[i],))
-                 for i in range(len(args))]
-    for p in processes:
-        p.start()
-    for p in processes:
-        p.join(timeout=7200)
-        if p.is_alive():
-            p.terminate()
-            p.join(timeout=30)
-    for fname in glob.glob("*.out"):
-        with open(fname, 'r') as fin:
-            print(fin.read())
-        os.remove(fname)
+
+    args = [item + ['-s', '--nologcapture', '--xunit-file=%s.xml' % item[0]]
+            for item in args]
+    return args
+
+if __name__ == "__main__":
+    # multiprocessing.log_to_stderr(logging.DEBUG)
+    tasks = multiprocessing.JoinableQueue()
+    results = multiprocessing.Queue()
+
+    num_consumers = 6  # TODO 
+    consumers = [NoseWorker(tasks, results) for i in range(num_consumers)]
+    for w in consumers:
+        w.start()
+
+    num_jobs = 0
+    for job in generate_tasks_input():
+        tasks.put(NoseTask(job))
+        num_jobs += 1
+
+    for i in range(num_consumers):
+        tasks.put(None)
+
+    tasks.join()
+
+    while num_jobs:
+        result = results.get()
+        print(result)
+        num_jobs -= 1

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -1,51 +1,68 @@
-local_artio_270:
-  - yt/frontends/artio/tests/test_outputs.py
+answer_tests:
+  local_artio_270:
+    - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_270:
-  - yt/frontends/athena
+  local_athena_270:
+    - yt/frontends/athena
 
-local_chombo_270:
-  - yt/frontends/chombo/tests/test_outputs.py
+  local_chombo_270:
+    - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_270:
-  - yt/frontends/enzo
+  local_enzo_270:
+    - yt/frontends/enzo
 
-local_fits_270:
-  - yt/frontends/fits/tests/test_outputs.py
+  local_fits_270:
+    - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_270:
-  - yt/frontends/flash/tests/test_outputs.py
+  local_flash_270:
+    - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_270:
-  - yt/frontends/gadget/tests/test_outputs.py
+  local_gadget_270:
+    - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_270:
-  - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
-  - yt/analysis_modules/halo_finding/tests/test_rockstar.py
-  - yt/frontends/owls_subfind/tests/test_outputs.py
+  local_halos_270:
+    - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+    - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+    - yt/frontends/owls_subfind/tests/test_outputs.py
+  
+  local_owls_270:
+    - yt/frontends/owls/tests/test_outputs.py
+  
+  local_pw_270:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
+    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
+  
+  local_tipsy_270:
+    - yt/frontends/tipsy/tests/test_outputs.py
+  
+  local_varia_271:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/visualization/volume_rendering/tests/test_mesh_render.py
 
-local_owls_270:
-  - yt/frontends/owls/tests/test_outputs.py
+  local_orion_270:
+    - yt/frontends/boxlib/tests/test_orion.py
+  
+  local_ramses_270:
+    - yt/frontends/ramses/tests/test_outputs.py
+  
+  local_ytdata_270:
+    - yt/frontends/ytdata
 
-local_pw_270:
-  - yt/visualization/tests/test_plotwindow.py:test_attributes
-  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+  local_absorption_spectrum_271:
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
 
-local_tipsy_270:
-  - yt/frontends/tipsy/tests/test_outputs.py
-
-local_varia_270:
-  - yt/analysis_modules/radmc3d_export
-  - yt/frontends/moab/tests/test_c5.py
-  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
-
-local_orion_270:
-  - yt/frontends/boxlib/tests/test_orion.py
-
-local_ramses_270:
-  - yt/frontends/ramses/tests/test_outputs.py
-
-local_ytdata_270:
-  - yt/frontends/ytdata
\ No newline at end of file
+other_tests:
+  unittests:
+     - '-v'
+  cookbook:
+     - '-v'
+     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 109872b62c3878385a5e7a79eb2c18e190284dd4 -r 203ee09e51eeffbab44fb6802c996d4ba106a264 tests/tests_3.4.yaml
--- a/tests/tests_3.4.yaml
+++ b/tests/tests_3.4.yaml
@@ -1,49 +1,57 @@
-local_artio_340:
-  - yt/frontends/artio/tests/test_outputs.py
+answer_tests:
+  local_artio_340:
+    - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_340:
-  - yt/frontends/athena
+  local_athena_340:
+    - yt/frontends/athena
 
-local_chombo_340:
-  - yt/frontends/chombo/tests/test_outputs.py
+  local_chombo_340:
+    - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_340:
-  - yt/frontends/enzo
+  local_enzo_340:
+    - yt/frontends/enzo
 
-local_fits_340:
-  - yt/frontends/fits/tests/test_outputs.py
+  local_fits_340:
+    - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_340:
-  - yt/frontends/flash/tests/test_outputs.py
+  local_flash_340:
+    - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_340:
-  - yt/frontends/gadget/tests/test_outputs.py
+  local_gadget_340:
+    - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_340:
-  - yt/frontends/owls_subfind/tests/test_outputs.py
+  local_halos_340:
+    - yt/frontends/owls_subfind/tests/test_outputs.py
 
-local_owls_340:
-  - yt/frontends/owls/tests/test_outputs.py
+  local_owls_340:
+    - yt/frontends/owls/tests/test_outputs.py
 
-local_pw_340:
-  - yt/visualization/tests/test_plotwindow.py:test_attributes
-  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+  local_pw_340:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
 
-local_tipsy_340:
-  - yt/frontends/tipsy/tests/test_outputs.py
+  local_tipsy_340:
+    - yt/frontends/tipsy/tests/test_outputs.py
 
-local_varia_340:
-  - yt/analysis_modules/radmc3d_export
-  - yt/frontends/moab/tests/test_c5.py
-  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+  local_varia_340:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
 
-local_orion_340:
-  - yt/frontends/boxlib/tests/test_orion.py
+  local_orion_340:
+    - yt/frontends/boxlib/tests/test_orion.py
 
-local_ramses_340:
-  - yt/frontends/ramses/tests/test_outputs.py
+  local_ramses_340:
+    - yt/frontends/ramses/tests/test_outputs.py
 
-local_ytdata_340:
-  - yt/frontends/ytdata
\ No newline at end of file
+  local_ytdata_340:
+    - yt/frontends/ytdata
+
+other_tests:
+  unittests:
+    - '-v'
+  cookbook:
+    - 'doc/source/cookbook/tests/test_cookbook.py'
+    - '-P'

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/483eb61849ed/
Changeset:   483eb61849ed
Branch:      yt
User:        jzuhone
Date:        2016-03-29 16:02:47+00:00
Summary:     Merge
Affected #:  42 files

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -0,0 +1,75 @@
+.. _cosmology-calculator:
+
+Cosmology Calculator
+====================
+
+The cosmology calculator can be used to calculate cosmological distances and
+times given a set of cosmological parameters.  A cosmological dataset, `ds`,
+will automatically have a cosmology calculator configured with the correct
+parameters associated with it as `ds.cosmology`.  A standalone
+:class:`~yt.utilities.cosmology.Cosmology` calculator object can be created
+in the following way:
+
+.. code-block:: python
+
+   from yt.utilities.cosmology import Cosmology
+
+   co = Cosmology(hubble_constant=0.7, omega_matter=0.3,
+                  omega_lambda=0.7, omega_curvature=0.0)
+
+Once created, various distance calculations as well as conversions between
+redshift and time are available:
+
+.. notebook-cell::
+
+   from yt.utilities.cosmology import Cosmology
+
+   co = Cosmology(hubble_constant=0.7, omega_matter=0.3,
+                  omega_lambda=0.7, omega_curvature=0.0)
+
+   # Hubble distance (c / h)
+   print("hubble distance", co.hubble_distance())
+
+   # distance from z = 0 to 0.5
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+
+   # transverse distance
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+
+   # comoving volume
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+
+   # angulare diameter distance
+   print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
+
+   # angular scale
+   print("angular scale", co.angular_scale(0, 0.5).in_units("Mpc/degree"))
+
+   # luminosity distance
+   print("luminosity distance", co.luminosity_distance(0, 0.5).in_units("Mpc/h"))
+
+   # time between two redshifts
+   print("lookback time", co.lookback_time(0, 0.5).in_units("Gyr"))
+
+   # age of the Universe at a given redshift
+   print("hubble time", co.hubble_time(0).in_units("Gyr"))
+
+   # critical density
+   print("critical density", co.critical_density(0))
+
+   # Hubble parameter at a given redshift
+   print("hubble parameter", co.hubble_parameter(0).in_units("km/s/Mpc"))
+
+   # convert time after Big Bang to redshift
+   my_t = co.quan(8, "Gyr")
+   print("z from t", co.z_from_t(my_t))
+
+   # convert redshift to time after Big Bang (same as Hubble time)
+   print("t from z", co.t_from_z(0.5).in_units("Gyr"))
+
+Note, that all distances returned are comoving distances.  All of the above
+functions accept scalar values and arrays.  The helper functions, `co.quan`
+and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
+unit registry of the cosmology calculator.  For more information on the usage
+and meaning of each calculation, consult the reference documentation at
+:ref:`cosmology-calculator-ref`.

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -12,6 +12,7 @@
 .. toctree::
    :maxdepth: 2
 
+   cosmology_calculator
    halo_analysis
    synthetic_observation
    exporting

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -791,6 +791,7 @@
    ~yt.data_objects.static_output.Dataset.box
    ~yt.funcs.deprecate
    ~yt.funcs.ensure_list
+   ~yt.funcs.enable_plugins
    ~yt.funcs.get_pbar
    ~yt.funcs.humanize_time
    ~yt.funcs.insert_ipython
@@ -861,6 +862,29 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
 
+.. _cosmology-calculator-ref:
+
+Cosmology Calculator
+--------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.utilities.cosmology.Cosmology
+   ~yt.utilities.cosmology.Cosmology.hubble_distance
+   ~yt.utilities.cosmology.Cosmology.comoving_radial_distance
+   ~yt.utilities.cosmology.Cosmology.comoving_transverse_distance
+   ~yt.utilities.cosmology.Cosmology.comoving_volume
+   ~yt.utilities.cosmology.Cosmology.angular_diameter_distance
+   ~yt.utilities.cosmology.Cosmology.angular_scale
+   ~yt.utilities.cosmology.Cosmology.luminosity_distance
+   ~yt.utilities.cosmology.Cosmology.lookback_time
+   ~yt.utilities.cosmology.Cosmology.hubble_time
+   ~yt.utilities.cosmology.Cosmology.critical_density
+   ~yt.utilities.cosmology.Cosmology.hubble_parameter
+   ~yt.utilities.cosmology.Cosmology.expansion_factor
+   ~yt.utilities.cosmology.Cosmology.z_from_t
+   ~yt.utilities.cosmology.Cosmology.t_from_z
 
 Testing Infrastructure
 ----------------------

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -36,7 +36,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     N      |   Full   |
+| Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Maestro               |   Y [#f1]_ |     N     |      Y     |   Y   |    Y     |    Y     |     N      | Partial  |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
@@ -48,7 +48,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | OWLS/EAGLE            |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Piernik               |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     N      |   Full   |
+| Piernik               |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Pluto                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -124,25 +124,22 @@
 objects, colormaps, and other code classes and objects to be used in future
 yt sessions without modifying the source code directly.  
 
+To force the plugin file to be parsed, call the function
+:func:`~yt.funcs.enable_plugins` at the top of your script.
 
 .. note::
 
-   The ``my_plugins.py`` is only parsed inside of ``yt.mods``, so in order
-   to use it, you must load yt with either: ``import yt.mods as yt``
-   or ``from yt.mods import *``.  You can tell that your
-   plugins file is being parsed by watching for a logging message when you
-   import yt.  Note that both the ``yt load`` and ``iyt`` command line entry
-   points invoke ``from yt.mods import *``, so the ``my_plugins.py`` file
-   will be parsed if you enter yt that way.
+   You can tell that your plugins file is being parsed by watching for a logging
+   message when you import yt.  Note that both the ``yt load`` and ``iyt``
+   command line entry points parse the plugin file, so the ``my_plugins.py``
+   file will be parsed if you enter yt that way.
 
 Plugin File Format
 ^^^^^^^^^^^^^^^^^^
 
-yt will look for and recognize the file ``$HOME/.yt/my_plugins`` as a plugin
+yt will look for and recognize the file ``$HOME/.yt/my_plugins.py`` as a plugin
 file, which should contain python code.  If accessing yt functions and classes
 they will not require the ``yt.`` prefix, because of how they are loaded.
-It is executed at the bottom of ``yt.mods``, and so
-it is provided with the entire namespace available in the module ``yt.mods``.
 
 For example, if I created a plugin file containing:
 
@@ -152,7 +149,7 @@
        return np.random.random(data["density"].shape)
    add_field("random", function=_myfunc, units='auto')
 
-then all of my data objects would have access to the field ``some_quantity``.
+then all of my data objects would have access to the field ``random``.
 
 You can also define other convenience functions in your plugin file.  For
 instance, you could define some variables or functions, and even import common
@@ -176,13 +173,19 @@
 
 .. code-block:: python
 
-   import yt.mods as yt
+   import yt
+   yt.enable_plugins()
 
    my_run = yt.load_run("hotgasflow/DD0040/DD0040")
 
-And because we have imported from ``yt.mods`` we have access to the
+And because we have used ``yt.enable_plugins`` we have access to the
 ``load_run`` function defined in our plugin file.
 
+Note that using the plugins file implies that your script is no longer fully
+reproducible. If you share your script with someone else and use some of the
+functionality if your plugins file, you will also need to share your plugins
+file for someone else to re-run your script properly.
+
 Adding Custom Colormaps
 ^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -97,7 +97,8 @@
 .. code-block:: python
 
     import yt
-    yt.show_colormaps(subset=['algae', 'kamae', 'spectral'], 
+    yt.show_colormaps(subset=['algae', 'kamae', 'spectral',
+                              'arbre', 'dusk', 'octarine', 'kelp'], 
                       filename="yt_native.png")
 
 Applying a Colormap to your Rendering

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -14,7 +14,7 @@
 
 .. code-block:: bash
 
-    conda install -c http://use.yt/with_conda/ yt=3.3_dev
+    conda install -c http://use.yt/with_conda/ yt
 
 If you want to install from source, you can use the ``get_yt.sh`` script.
 Be sure to set the INST_YT_SOURCE and INST_UNSTRUCTURED flags to 1 at the 
@@ -73,7 +73,13 @@
 
 as usual. Finally, if you create a file called embree.cfg in the yt-hg directory with
 the location of the embree installation, the setup script will find this and use it, 
-provided EMBREE_DIR is not set. We recommend one of the later two methods, especially
+provided EMBREE_DIR is not set. An example embree.cfg file could like this:
+
+.. code-block:: bash
+
+   /opt/local/
+
+We recommend one of the later two methods, especially
 if you plan on re-compiling the cython extensions regularly. Note that none of this is
 neccessary if you installed embree into a location that is in your default path, such
 as /usr/local.

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -76,17 +76,58 @@
     '''
 
     rd = os.environ.get('EMBREE_DIR')
-    if rd is not None:
-        return rd
-    print("EMBREE_DIR not set. Attempting to read embree.cfg")
+    if rd is None:
+        try:
+            rd = open("embree.cfg").read().strip()
+        except IOError:
+            rd = '/usr/local'
+
+    fail_msg = ("Pyembree is installed, but I could not compile Embree test code. \n"
+               "I attempted to find Embree headers in %s. \n"
+               "If this is not correct, please set your correct embree location \n"
+               "using EMBREE_DIR environment variable or your embree.cfg file. \n"
+               "Please see http://yt-project.org/docs/dev/visualizing/unstructured_mesh_rendering.html "
+                "for more information." % rd)
+
+    # Create a temporary directory
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+
     try:
-        rd = open("embree.cfg").read().strip()
-        return rd
-    except IOError:
-        print("Reading Embree location from embree.cfg failed.")
-        print("If compilation fails, please place the base directory")
-        print("of your Embree install in embree.cfg and restart.")
-        return '/usr/local'
+        os.chdir(tmpdir)
+
+        # Get compiler invocation
+        compiler = os.getenv('CXX', 'c++')
+        compiler = compiler.split(' ')
+
+        # Attempt to compile a test script.
+        filename = r'test.cpp'
+        file = open(filename, 'wt', 1)
+        file.write(
+            '#include "embree2/rtcore.h"\n'
+            'int main() {\n'
+            'return 0;\n'
+            '}'
+        )
+        file.flush()
+        with open(os.devnull, 'w') as fnull:
+            exit_code = subprocess.call(compiler + ['-I%s/include/' % rd, filename],
+                             stdout=fnull, stderr=fnull)
+
+        # Clean up
+        file.close()
+
+    except OSError:
+        print(fail_msg)
+
+    finally:
+        os.chdir(curdir)
+        shutil.rmtree(tmpdir)
+
+    if exit_code != 0:
+        print(fail_msg)
+
+    return rd
 
 
 def get_mercurial_changeset_id(target_dir):

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -20,6 +20,9 @@
   local_gadget_270:
     - yt/frontends/gadget/tests/test_outputs.py
 
+  local_gdf_270:
+    - yt/frontends/gdf/tests/test_outputs.py
+
   local_halos_270:
     - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
     - yt/analysis_modules/halo_finding/tests/test_rockstar.py
@@ -28,7 +31,7 @@
   local_owls_270:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_270:
+  local_pw_271:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -39,7 +42,7 @@
   local_tipsy_270:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_271:
+  local_varia_273:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -17,6 +17,8 @@
 import numpy as np
 import os
 
+from yt.config import \
+    ytcfg
 from yt.funcs import \
     mylog, \
     only_on_root
@@ -233,7 +235,7 @@
                            weight_field=None, photon_field=False,
                            save_stack=True, save_final_image=True,
                            save_slice_images=False,
-                           cmap_name="algae",
+                           cmap_name=None,
                            njobs=1, dynamic=False):
         r"""Create projections for light cone, then add them together.
 
@@ -266,7 +268,7 @@
             Default: False.
         cmap_name : string
             color map for images.
-            Default: "algae".
+            Default: your default colormap.
         njobs : int
             The number of parallel jobs over which the light cone projection
             will be split.  Choose -1 for one processor per individual
@@ -279,6 +281,9 @@
 
         """
 
+        if cmap_name is None:
+            cmap_name = ytcfg.get("yt", "default_colormap")
+
         if isinstance(field_of_view, tuple) and len(field_of_view) == 2:
             field_of_view = self.simulation.quan(field_of_view[0],
                                                  field_of_view[1])

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -18,6 +18,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from yt.config import \
+    ytcfg
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
 from yt.funcs import fix_axis, get_pbar
 from yt.visualization.volume_rendering.off_axis_projection import \
@@ -391,7 +393,7 @@
         fib.writeto(filename, clobber=clobber)
 
     @parallel_root_only
-    def write_png(self, filename_prefix, cmap_name="algae",
+    def write_png(self, filename_prefix, cmap_name=None,
                   axes_units="kpc", log_fields=None):
         r""" Export images to PNG files. Writes the SZ distortion in all
         specified frequencies as well as the mass-weighted temperature and the
@@ -406,6 +408,9 @@
         --------
         >>> szprj.write_png("SZsloshing")
         """
+        if cmap_name is None:
+            cmap_name = ytcfg.get("yt", "default_colormap")
+        
         import matplotlib
         matplotlib.use('Agg')
         import matplotlib.pyplot as plt

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -61,6 +61,7 @@
     ignore_invalid_unit_operation_errors = 'False',
     chunk_size = '1000',
     xray_data_dir = '/does/not/exist',
+    default_colormap = 'arbre',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -40,6 +40,8 @@
     YTParticleDepositionNotImplemented, \
     YTNoAPIKey, \
     YTTooManyVertices
+from yt.fields.field_exceptions import \
+    NeedsGridType
 from yt.utilities.lib.quad_tree import \
     QuadTree
 from yt.utilities.lib.interpolators import \
@@ -492,6 +494,8 @@
         Number of cells along each axis of resulting covering_grid
     fields : array_like, optional
         A list of fields that you'd like pre-generated for your object
+    num_ghost_zones : integer, optional
+        The number of padding ghost zones used when accessing fields.
 
     Examples
     --------
@@ -609,7 +613,16 @@
         fields_to_get = [f for f in fields if f not in self.field_data]
         fields_to_get = self._identify_dependencies(fields_to_get)
         if len(fields_to_get) == 0: return
-        fill, gen, part, alias = self._split_fields(fields_to_get)
+        try:
+            fill, gen, part, alias = self._split_fields(fields_to_get)
+        except NeedsGridType:
+            if self._num_ghost_zones == 0:
+                raise RuntimeError(
+                    "Attempting to access a field that needs ghost zones, but "
+                    "num_ghost_zones = %s. You should create the covering grid "
+                    "with nonzero num_ghost_zones." % self._num_ghost_zones)
+            else:
+                raise
         if len(part) > 0: self._fill_particles(part)
         if len(fill) > 0: self._fill_fields(fill)
         for a, f in sorted(alias.items()):
@@ -1212,7 +1225,7 @@
         return vv
 
     def export_obj(self, filename, transparency = 1.0, dist_fac = None,
-                   color_field = None, emit_field = None, color_map = "algae",
+                   color_field = None, emit_field = None, color_map = None,
                    color_log = True, emit_log = True, plot_index = None,
                    color_field_max = None, color_field_min = None,
                    emit_field_max = None, emit_field_min = None):
@@ -1292,6 +1305,8 @@
         ...                      dist_fac = distf, plot_index = i)
 
         """
+        if color_map is None:
+            color_map = ytcfg.get("yt", "default_colormap")
         if self.vertices is None:
             if color_field is not None:
                 self.get_data(color_field,"face")
@@ -1366,10 +1381,12 @@
 
     @parallel_root_only
     def _export_obj(self, filename, transparency, dist_fac = None,
-                    color_field = None, emit_field = None, color_map = "algae",
+                    color_field = None, emit_field = None, color_map = None,
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
+        if color_map is None:
+            color_map = ytcfg.get("yt", "default_colormap")
         if plot_index is None:
             plot_index = 0
         if isinstance(filename, io.IOBase):
@@ -1460,7 +1477,7 @@
 
 
     def export_blender(self,  transparency = 1.0, dist_fac = None,
-                   color_field = None, emit_field = None, color_map = "algae",
+                   color_field = None, emit_field = None, color_map = None,
                    color_log = True, emit_log = True, plot_index = None,
                    color_field_max = None, color_field_min = None,
                    emit_field_max = None, emit_field_min = None):
@@ -1540,6 +1557,8 @@
         ...                      dist_fac = distf, plot_index = i)
 
         """
+        if color_map is None:
+            color_map = ytcfg.get("yt", "default_colormap")
         if self.vertices is None:
             if color_field is not None:
                 self.get_data(color_field,"face")
@@ -1559,10 +1578,12 @@
         return fullverts, colors, alpha, emisses, colorindex
 
     def _export_blender(self, transparency, dist_fac = None,
-                    color_field = None, emit_field = None, color_map = "algae",
+                    color_field = None, emit_field = None, color_map = None,
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
+        if color_map is None:
+            color_map = ytcfg.get("yt", "default_colormap")
         if plot_index is None:
             plot_index = 0
         ftype = [("cind", "uint8"), ("emit", "float")]
@@ -1607,7 +1628,7 @@
 
 
     def export_ply(self, filename, bounds = None, color_field = None,
-                   color_map = "algae", color_log = True, sample_type = "face",
+                   color_map = None, color_log = True, sample_type = "face",
                    no_ghost=False):
         r"""This exports the surface to the PLY format, suitable for visualization
         in many different programs (e.g., MeshLab).
@@ -1639,6 +1660,8 @@
         ...            sp.center[i] + 5.0*kpc) for i in range(3)]
         >>> surf.export_ply("my_galaxy.ply", bounds = bounds)
         """
+        if color_map is None:
+            color_map = ytcfg.get("yt", "default_colormap")
         if self.vertices is None:
             self.get_data(color_field, sample_type, no_ghost=no_ghost)
         elif color_field is not None:
@@ -1663,7 +1686,9 @@
 
     @parallel_root_only
     def _export_ply(self, filename, bounds = None, color_field = None,
-                   color_map = "algae", color_log = True, sample_type = "face"):
+                   color_map = None, color_log = True, sample_type = "face"):
+        if color_map is None:
+            color_map = ytcfg.get("yt", "default_colormap")
         if hasattr(filename, 'read'):
             f = filename
         else:
@@ -1727,7 +1752,7 @@
             f.close()
 
     def export_sketchfab(self, title, description, api_key = None,
-                            color_field = None, color_map = "algae",
+                            color_field = None, color_map = None,
                             color_log = True, bounds = None, no_ghost = False):
         r"""This exports Surfaces to SketchFab.com, where they can be viewed
         interactively in a web browser.
@@ -1784,6 +1809,8 @@
         ...     bounds = bounds)
         ...
         """
+        if color_map is None:
+            color_map = ytcfg.get("yt", "default_colormap")
         api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
         if api_key in (None, "None"):
             raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1005,9 +1005,9 @@
             # these tests are really insufficient as a field type may be valid, and the
             # field name may be valid, but not the combination (field type, field name)
             if finfo.particle_type and ftype not in self.ds.particle_types:
-                raise YTFieldTypeNotFound(ftype)
+                raise YTFieldTypeNotFound(ftype, ds=self.ds)
             elif not finfo.particle_type and ftype not in self.ds.fluid_types:
-                raise YTFieldTypeNotFound(ftype)
+                raise YTFieldTypeNotFound(ftype, ds=self.ds)
             explicit_fields.append((ftype, fname))
         return explicit_fields
 
@@ -1439,7 +1439,7 @@
             height = width
         elif iterable(height):
             h, u = height
-            height = self.ds.quan(w, input_units = u)
+            height = self.ds.quan(h, input_units = u)
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import FixedResolutionBuffer

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/data_objects/image_array.py
--- a/yt/data_objects/image_array.py
+++ b/yt/data_objects/image_array.py
@@ -13,6 +13,8 @@
 
 import warnings
 import numpy as np
+from yt.config import \
+    ytcfg
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.units.yt_array import YTArray
 
@@ -307,7 +309,7 @@
             return write_bitmap(out.swapaxes(0, 1), filename)
 
     def write_image(self, filename, color_bounds=None, channel=None,
-                    cmap_name="algae", func=lambda x: x):
+                    cmap_name=None, func=lambda x: x):
         r"""Writes a single channel of the ImageArray to a png file.
 
         Parameters
@@ -348,6 +350,8 @@
         >>> im_arr.write_image('test_ImageArray.png')
 
         """
+        if cmap_name is None:
+            cmap_name = ytcfg.get("yt", "default_colormap")
         if filename[-4:] != '.png':
             filename += '.png'
 

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -92,8 +92,8 @@
     coords : tuple of floats
         The (plane_x, plane_y) coordinates at which to cast the ray.  Note
         that this is in the plane coordinates: so if you are casting along
-        x, this will be (y,z).  If you are casting along y, this will be
-        (x,z).  If you are casting along z, this will be (x,y).
+        x, this will be (y, z).  If you are casting along y, this will be
+        (z, x).  If you are casting along z, this will be (x, y).
     ds: Dataset, optional
         An optional dataset to use rather than self.ds
     field_parameters : dictionary

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -523,7 +523,9 @@
         fields = [ (union.name, field) for field in fields]
         new_fields = [_ for _ in fields if _ not in self.field_list]
         self.field_list.extend(new_fields)
-        self.field_info.field_list.extend(new_fields)
+        new_field_info_fields = [
+            _ for _ in fields if _ not in self.field_info.field_list]
+        self.field_info.field_list.extend(new_field_info_fields)
         self.index.field_list = sorted(self.field_list)
         # Give ourselves a chance to add them here, first, then...
         # ...if we can't find them, we set them up as defaults.

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -123,6 +123,10 @@
         file provided to the loop.
     setup_function : callable, accepts a ds
         This function will be called whenever a dataset is loaded.
+    mixed_dataset_types : True or False, default False
+        Set to True if the DatasetSeries will load different dataset types, set
+        to False if loading dataset of a single type as this will result in a
+        considerable speed up from not having to figure out the dataset type.
 
     Examples
     --------
@@ -154,8 +158,9 @@
         return ret
 
     def __init__(self, outputs, parallel = True, setup_function = None,
-                 **kwargs):
+                 mixed_dataset_types = False, **kwargs):
         # This is needed to properly set _pre_outputs for Simulation subclasses.
+        self._mixed_dataset_types = mixed_dataset_types
         if iterable(outputs) and not isinstance(outputs, string_types):
             self._pre_outputs = outputs[:]
         self.tasks = AnalysisTaskProxy(self)
@@ -173,7 +178,7 @@
         # We can make this fancier, but this works
         for o in self._pre_outputs:
             if isinstance(o, string_types):
-                ds = load(o, **self.kwargs)
+                ds = self._load(o, **self.kwargs)
                 self._setup_function(ds)
                 yield ds
             else:
@@ -187,7 +192,7 @@
             return DatasetSeries(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
         if isinstance(o, string_types):
-            o = load(o, **self.kwargs)
+            o = self._load(o, **self.kwargs)
             self._setup_function(o)
         return o
 
@@ -278,7 +283,7 @@
                 sto, output = output
 
             if isinstance(output, string_types):
-                ds = load(output, **self.kwargs)
+                ds = self._load(output, **self.kwargs)
                 self._setup_function(ds)
             else:
                 ds = output
@@ -384,6 +389,16 @@
         obj = cls(filenames, parallel = parallel)
         return obj
 
+    _dataset_cls = None
+    def _load(self, output_fn, **kwargs):
+        if self._dataset_cls is not None:
+            return self._dataset_cls(output_fn, **kwargs)
+        elif self._mixed_dataset_types:
+            return load(output_fn, **kwargs)
+        ds = load(output_fn, **kwargs)
+        self._dataset_cls = ds.__class__
+        return ds
+
 class TimeSeriesQuantitiesContainer(object):
     def __init__(self, data_object, quantities):
         self.data_object = data_object

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -718,12 +718,13 @@
     cdef np.int64_t **pcount
     cdef float **root_mesh_data
     cdef np.int64_t nvars[2]
+    cdef int cache_root_mesh
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
                  domain_right_edge,
                  artio_fileset artio_handle,
-                 sfc_start, sfc_end):
+                 sfc_start, sfc_end, int cache_root_mesh = 0):
         cdef int i
         cdef np.int64_t sfc
         self.sfc_start = sfc_start
@@ -735,6 +736,7 @@
         self.oct_count = None
         self.root_mesh_data = NULL
         self.pcount = NULL
+        self.cache_root_mesh = cache_root_mesh
 
         if artio_handle.has_particles:
             self.pcount = <np.int64_t **> malloc(sizeof(np.int64_t*)
@@ -789,10 +791,11 @@
         cdef float *grid_variables = <float *>malloc(
             ngv * sizeof(float))
         self.octree_handler = octree = ARTIOOctreeContainer(self)
-        self.root_mesh_data = <float **>malloc(sizeof(float *) * ngv)
-        for i in range(ngv):
-            self.root_mesh_data[i] = <float *>malloc(sizeof(float) * \
-                (self.sfc_end - self.sfc_start + 1))
+        if self.cache_root_mesh == 1:
+            self.root_mesh_data = <float **>malloc(sizeof(float *) * ngv)
+            for i in range(ngv):
+                self.root_mesh_data[i] = <float *>malloc(sizeof(float) * \
+                    (self.sfc_end - self.sfc_start + 1))
         # We want to pre-allocate an array of root pointers.  In the future,
         # this will be pre-determined by the ARTIO library.  However, because
         # realloc plays havoc with our tree searching, we can't utilize an
@@ -808,7 +811,7 @@
                 sfc, dpos, grid_variables, &num_oct_levels,
                 num_octs_per_level)
             check_artio_status(status)
-            for i in range(ngv):
+            for i in range(ngv * self.cache_root_mesh):
                 self.root_mesh_data[i][sfc - self.sfc_start] = \
                     grid_variables[i]
             if num_oct_levels > 0:
@@ -823,7 +826,6 @@
             check_artio_status(status)
         status = artio_grid_clear_sfc_cache(self.handle)
         check_artio_status(status)
-
         if self.artio_handle.has_particles:
             num_particles_per_species =  <int *>malloc(
                     sizeof(int)*num_species)
@@ -1505,10 +1507,15 @@
         cdef np.int64_t sfc, num_cells, sfci = -1
         cdef np.float64_t val
         cdef double dpos[3]
+        max_level = self.artio_handle.max_level
+        cdef int *num_octs_per_level = <int *>malloc(
+            (max_level + 1)*sizeof(int))
         # We duplicate some of the grid_variables stuff here so that we can
         # potentially release the GIL
         nf = len(field_indices)
         ngv = self.artio_handle.num_grid_variables
+        cdef float *grid_variables = <float *>malloc(
+            ngv * sizeof(float))
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
         num_cells = self._last_mask_sum
@@ -1528,17 +1535,39 @@
         # location based on the file index.
         cdef int filled = 0
         cdef float **mesh_data = self.range_handler.root_mesh_data
-        for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
-            sfci += 1
-            if mask[sfci] == 0: continue
-            for i in range(nf):
-                field_vals[i][filled] = mesh_data[field_ind[i]][
-                    sfc - self.sfc_start]
-            filled += 1
+        if mesh_data == NULL:
+            status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
+                                                self.sfc_end)
+            check_artio_status(status)
+            for sfc in range(self.sfc_start, self.sfc_end + 1):
+                if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+                sfci += 1
+                if mask[sfci] == 0: continue
+                status = artio_grid_read_root_cell_begin( self.handle,
+                    sfc, dpos, grid_variables, &num_oct_levels,
+                    num_octs_per_level)
+                check_artio_status(status)
+                for i in range(nf):
+                    field_vals[i][filled] = grid_variables[field_ind[i]]
+                filled += 1
+                status = artio_grid_read_root_cell_end(self.handle)
+                check_artio_status(status)
+            status = artio_grid_clear_sfc_cache(self.handle)
+            check_artio_status(status)
+        else:
+            for sfc in range(self.sfc_start, self.sfc_end + 1):
+                if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+                sfci += 1
+                if mask[sfci] == 0: continue
+                for i in range(nf):
+                    field_vals[i][filled] = mesh_data[field_ind[i]][
+                        sfc - self.sfc_start]
+                filled += 1
         # Now we have all our sources.
         free(field_ind)
         free(field_vals)
+        free(grid_variables)
+        free(num_octs_per_level)
         return tr
 
     @cython.boundscheck(False)

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -164,6 +164,7 @@
         self.directory = os.path.dirname(self.index_filename)
 
         self.max_level = ds.max_level
+        self.range_handlers = {}
         self.float_type = np.float64
         super(ARTIOIndex, self).__init__(ds, dataset_type)
 
@@ -251,11 +252,15 @@
             #v = np.array(list_sfc_ranges)
             #list_sfc_ranges = [ (v.min(), v.max()) ]
             for (start, end) in list_sfc_ranges:
-                range_handler = ARTIOSFCRangeHandler(
-                    self.ds.domain_dimensions,
-                    self.ds.domain_left_edge, self.ds.domain_right_edge,
-                    self.ds._handle, start, end)
-                range_handler.construct_mesh()
+                if (start, end) in self.range_handlers.keys():
+                    range_handler = self.range_handlers[(start, end)]
+                else:
+                    range_handler = ARTIOSFCRangeHandler(
+                        self.ds.domain_dimensions,
+                        self.ds.domain_left_edge, self.ds.domain_right_edge,
+                        self.ds._handle, start, end)
+                    range_handler.construct_mesh()
+                    self.range_handlers[(start, end)] = range_handler
                 if nz != 2:
                     ci.append(ARTIORootMeshSubset(base_region, start, end,
                                 range_handler.root_mesh_handler, self.ds))

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/frontends/gdf/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/gdf/tests/test_outputs.py
@@ -0,0 +1,47 @@
+"""
+GDF frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.gdf.api import GDFDataset
+
+_fields = ("density", "velocity_x")
+
+sedov = "sedov/sedov_tst_0004.h5"
+
+ at requires_ds(sedov)
+def test_sedov_tunnel():
+    ds = data_dir_load(sedov)
+    yield assert_equal, str(ds), "sedov_tst_0004"
+    for test in small_patch_amr(ds, _fields):
+        test_sedov_tunnel.__name__ = test.description
+        yield test
+
+
+ at requires_file(sedov)
+def test_GDFDataset():
+    assert isinstance(data_dir_load(sedov), GDFDataset)
+
+
+ at requires_file(sedov)
+def test_units_override():
+    for test in units_override_check(sedov):
+        yield test

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -838,6 +838,17 @@
     return _func
 
 def enable_plugins():
+    """Forces the plugins file to be parsed.
+
+    This plugin file is a means of creating custom fields, quantities,
+    data objects, colormaps, and other code classes and objects to be used
+    in yt scripts without modifying the yt source directly.
+
+    The file must be located at ``$HOME/.yt/my_plugins.py``.
+
+    Warning: when you use this function, your script will only be reproducible
+    if you also provide the ``my_plugins.py`` file.
+    """
     import yt
     from yt.fields.my_plugin_fields import my_plugins_fields
     from yt.config import ytcfg

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -77,6 +77,7 @@
         self._initialize_indices()
         self.oct_handler.finalize()
         self.max_level = self.oct_handler.max_level
+        self.dataset.max_level = self.max_level
         tot = sum(self.oct_handler.recursively_count().values())
         only_on_root(mylog.info, "Identified %0.3e octs", tot)
 

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -317,7 +317,75 @@
     return ds
 
 
+def fake_vr_orientation_test_ds(N = 96):
+    """
+    create a toy dataset that puts a sphere at (0,0,0), a single cube
+    on +x, two cubes on +y, and three cubes on +z in a domain from
+    [-1,1]**3.  The lower planes (x = -1, y = -1, z = -1) are also
+    given non-zero values.
+
+    This dataset allows you to easily explore orientations and
+    handiness in VR and other renderings
+
+    """
+    from yt.frontends.stream.api import load_uniform_grid
+
+    xmin = ymin = zmin = -1.0
+    xmax = ymax = zmax = 1.0
+
+    dcoord = (xmax - xmin)/N
+
+    arr = np.zeros((N,N,N), dtype=np.float64)
+    arr[:,:,:] = 1.e-4
+
+    bbox = np.array([ [xmin, xmax], [ymin, ymax], [zmin, zmax] ])
+
+    # coordinates -- in the notation data[i, j, k]
+    x = (np.arange(N) + 0.5)*dcoord + xmin
+    y = (np.arange(N) + 0.5)*dcoord + ymin
+    z = (np.arange(N) + 0.5)*dcoord + zmin
+
+    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
+
+    # sphere at the origin
+    c = np.array( [0.5*(xmin + xmax), 0.5*(ymin + ymax), 0.5*(zmin + zmax) ] )
+    r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
+    arr[r < 0.05] = 1.0
+
+    arr[abs(x3d - xmin) < 2*dcoord] = 0.3
+    arr[abs(y3d - ymin) < 2*dcoord] = 0.3
+    arr[abs(z3d - zmin) < 2*dcoord] = 0.3
+
+    # single cube on +x
+    xc = 0.75
+    dx = 0.05
+    idx = np.logical_and(np.logical_and(x3d > xc-dx, x3d < xc+dx),
+                         np.logical_and(np.logical_and(y3d > -dx, y3d < dx),
+                                        np.logical_and(z3d > -dx, z3d < dx)) )
+    arr[idx] = 1.0
+
+    # two cubes on +y
+    dy = 0.05
+    for yc in [0.65, 0.85]:
+        idx = np.logical_and(np.logical_and(y3d > yc-dy, y3d < yc+dy),
+                             np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
+                                            np.logical_and(z3d > -dy, z3d < dy)) )
+        arr[idx] = 0.8
+
+    # three cubes on +z
+    dz = 0.05
+    for zc in [0.5, 0.7, 0.9]:
+        idx = np.logical_and(np.logical_and(z3d > zc-dz, z3d < zc+dz),
+                             np.logical_and(np.logical_and(x3d > -dz, x3d < dz),
+                                            np.logical_and(y3d > -dz, y3d < dz)) )
+        arr[idx] = 0.6
+
+    data = dict(density = (arr, "g/cm**3"))
+    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
+    return ds
+
 def expand_keywords(keywords, full=False):
+
     """
     expand_keywords is a means for testing all possible keyword
     arguments in the nosetests.  Simply pass it a dictionary of all the

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -264,14 +264,13 @@
     path = ytcfg.get("yt", "test_data_dir")
     if not os.path.isdir(path):
         return False
-    with temp_cwd(path):
-        if file_check:
-            return os.path.isfile(ds_fn) and \
-                AnswerTestingTest.result_storage is not None
-        try:
-            load(ds_fn)
-        except YTOutputNotIdentified:
-            return False
+    if file_check:
+        return os.path.isfile(os.path.join(path, ds_fn)) and \
+            AnswerTestingTest.result_storage is not None
+    try:
+        load(ds_fn)
+    except YTOutputNotIdentified:
+        return False
     return AnswerTestingTest.result_storage is not None
 
 def can_run_sim(sim_fn, sim_type, file_check = False):
@@ -280,14 +279,13 @@
     path = ytcfg.get("yt", "test_data_dir")
     if not os.path.isdir(path):
         return False
-    with temp_cwd(path):
-        if file_check:
-            return os.path.isfile(sim_fn) and \
-                AnswerTestingTest.result_storage is not None
-        try:
-            simulation(sim_fn, sim_type)
-        except YTOutputNotIdentified:
-            return False
+    if file_check:
+        return os.path.isfile(os.path.join(path, sim_fn)) and \
+            AnswerTestingTest.result_storage is not None
+    try:
+        simulation(sim_fn, sim_type)
+    except YTOutputNotIdentified:
+        return False
     return AnswerTestingTest.result_storage is not None
 
 def data_dir_load(ds_fn, cls = None, args = None, kwargs = None):
@@ -297,13 +295,12 @@
     if isinstance(ds_fn, Dataset): return ds_fn
     if not os.path.isdir(path):
         return False
-    with temp_cwd(path):
-        if cls is None:
-            ds = load(ds_fn, *args, **kwargs)
-        else:
-            ds = cls(ds_fn, *args, **kwargs)
-        ds.index
-        return ds
+    if cls is None:
+        ds = load(ds_fn, *args, **kwargs)
+    else:
+        ds = cls(os.path.join(path, ds_fn), *args, **kwargs)
+    ds.index
+    return ds
 
 def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
                  find_outputs=False):
@@ -311,9 +308,8 @@
         raise IOError
     if os.path.exists(sim_fn) or not path:
         path = "."
-    with temp_cwd(path):
-        return simulation(sim_fn, sim_type,
-                          find_outputs=find_outputs)
+    return simulation(os.path.join(path, sim_fn), sim_type,
+                      find_outputs=find_outputs)
 
 class AnswerTestingTest(object):
     reference_storage = None

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -50,6 +50,8 @@
 if ytcfg.getboolean("yt","loadfieldplugins"):
     enable_plugins()
 
+_default_colormap = ytcfg.get("yt", "default_colormap")
+
 def _fix_ds(arg):
     if os.path.isdir("%s" % arg) and \
         os.path.exists("%s/%s" % (arg,arg)):
@@ -160,7 +162,7 @@
                    help="Field to weight projections with"),
     cmap    = dict(longname="--colormap",
                    action="store", type=str,
-                   dest="cmap", default="algae",
+                   dest="cmap", default=_default_colormap,
                    help="Colormap name"),
     zlim    = dict(short="-z", longname="--zlim",
                    action="store", type=float,

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -4,7 +4,6 @@
 and featuring time and redshift conversion functions from Enzo.
 
 """
-from __future__ import print_function
 
 #-----------------------------------------------------------------------------
 # Copyright (c) 2013-2014, yt Development Team.
@@ -58,7 +57,7 @@
 
     >>> from yt.utilities.cosmology import Cosmology
     >>> co = Cosmology()
-    >>> print co.hubble_time(0.0).in_units("Gyr")
+    >>> print(co.hubble_time(0.0).in_units("Gyr"))
     
     """
     def __init__(self, hubble_constant = 0.71,
@@ -104,8 +103,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.comoving_radial_distance(0., 1.).in_units("Mpccm")
+        >>> print(co.comoving_radial_distance(0., 1.).in_units("Mpccm"))
         
         """
         return (self.hubble_distance() *
@@ -127,8 +127,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.comoving_transverse_distance(0., 1.).in_units("Mpccm")
+        >>> print(co.comoving_transverse_distance(0., 1.).in_units("Mpccm"))
         
         """
         if (self.omega_curvature > 0):
@@ -161,8 +162,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.comoving_volume(0., 1.).in_units("Gpccm**3")
+        >>> print(co.comoving_volume(0., 1.).in_units("Gpccm**3"))
 
         """
         if (self.omega_curvature > 0):
@@ -209,8 +211,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.angular_diameter_distance(0., 1.).in_units("Mpc")
+        >>> print(co.angular_diameter_distance(0., 1.).in_units("Mpc"))
         
         """
         
@@ -232,8 +235,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.angular_scale(0., 1.).in_units("kpc / arcsec")
+        >>> print(co.angular_scale(0., 1.).in_units("kpc / arcsec"))
         
         """
 
@@ -256,8 +260,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.luminosity_distance(0., 1.).in_units("Mpc")
+        >>> print(co.luminosity_distance(0., 1.).in_units("Mpc"))
         
         """
 
@@ -279,8 +284,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.lookback_time(0., 1.).in_units("Gyr")
+        >>> print(co.lookback_time(0., 1.).in_units("Gyr"))
 
         """
         return (trapzint(self.age_integrand, z_i, z_f) / \
@@ -301,8 +307,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.hubble_time(0.).in_units("Gyr")
+        >>> print(co.hubble_time(0.).in_units("Gyr"))
 
         See Also
         --------
@@ -326,9 +333,10 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.critical_density(0.).in_units("g/cm**3")
-        >>> print co.critical_density(0).in_units("Msun/Mpc**3")
+        >>> print(co.critical_density(0.).in_units("g/cm**3"))
+        >>> print(co.critical_density(0).in_units("Msun/Mpc**3"))
         
         """
         return (3.0 / 8.0 / np.pi * 
@@ -348,8 +356,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.hubble_parameter(1.0).in_units("km/s/Mpc")
+        >>> print(co.hubble_parameter(1.0).in_units("km/s/Mpc"))
 
         """
         return self.hubble_constant.in_base(self.unit_system) * self.expansion_factor(z)
@@ -393,8 +402,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.z_from_t(4.e17)
+        >>> print(co.z_from_t(4.e17))
 
         """
 
@@ -484,8 +494,9 @@
         Examples
         --------
 
+        >>> from yt.utilities.cosmology import Cosmology
         >>> co = Cosmology()
-        >>> print co.t_from_z(0.).in_units("Gyr")
+        >>> print(co.t_from_z(0.).in_units("Gyr"))
 
         See Also
         --------

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -75,11 +75,18 @@
         return "Could field '%s' in %s could not be generated." % (self.fname, self.ds)
 
 class YTFieldTypeNotFound(YTException):
-    def __init__(self, fname):
-        self.fname = fname
+    def __init__(self, ftype, ds=None):
+        self.ftype = ftype
+        self.ds = ds
 
     def __str__(self):
-        return "Could not find field '%s'." % (self.fname)
+        if self.ds is not None and \
+          self.ftype in self.ds.particle_types:
+            return ("Could not find field type '%s'.  " +
+                    "This field type is a known particle type for this dataset.  " +
+                    "Try adding this field with particle_type=True.") % self.ftype
+        else:
+            return "Could not find field type '%s'." % (self.ftype)
 
 class YTSimulationNotIdentified(YTException):
     def __init__(self, sim_type):

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7824,7 +7824,7 @@
                       -16.059027777777771],
                'min_Jp': 17.1875,
                'max_Jp': 82.1875}
-color_map_luts["cm_candidate_1"] = (array(
+color_map_luts["octarine"] = (array(
   [ 0.01845663, 0.01940818, 0.02066025, 0.02218966, 0.02395409, 0.02595033,
     0.02817596, 0.03060653, 0.03322304, 0.03602798, 0.03900455, 0.04208415,
     0.04516324, 0.04823603, 0.05128648, 0.05431253, 0.05730541, 0.06025524,
@@ -7955,145 +7955,145 @@
     0.9177585 , 0.92595735, 0.93431661, 0.94285311, 0.95166927, 0.96090167,
     0.97095595, 0.97849108, 0.98057884, 0.98147471]), np.ones(256))
 
-# Used to reconstruct the colormap in viscm
-parameters = {'xp': [17.623025510286254, 20.414094090828513,
-                    -82.390265292478205, -3.3099888437807294, -5.170701230808902],
-              'yp': [12.406964380648589, -98.305422647527877, 52.412280701754383,
-                     34.735513024986687, 22.175704412546509],
-              'min_Jp': 13.5507921715,
-              'max_Jp': 93.8863000932}
+parameters = {'xp': [25.813729633909759, 31.169191027506741,
+                    -75.940036844432967, -15.794085808651431,
+                    -6.7309972964103792],
+              'yp': [14.230225988700568, -99.470338983050823,
+                      9.2867231638418275, 41.007532956685509,
+                      31.532485875706215],
+              'min_Jp': 27.2243940579,
+              'max_Jp': 94.7771696638}
 
-
-color_map_luts["cm_candidate_2"] = (array(
-  [ 0.22330277, 0.22677033, 0.23017935, 0.23353169, 0.23681402, 0.2400368,
-    0.24320742, 0.24631505, 0.24936304, 0.25236366, 0.25530723, 0.25819299,
-    0.2610367 , 0.26382794, 0.26656596, 0.26926798, 0.2719204 , 0.27452761,
-    0.27710562, 0.27963477, 0.28213047, 0.28460423, 0.28702736, 0.28943233,
-    0.29181274, 0.29415763, 0.29649262, 0.29879558, 0.30108328, 0.30335993,
-    0.30560745, 0.30785443, 0.31007522, 0.31229044, 0.31449347, 0.31668024,
-    0.3188655 , 0.32102524, 0.32318876, 0.32532296, 0.32745808, 0.32956694,
-    0.33167014, 0.33374679, 0.33581262, 0.33784825, 0.33986937, 0.34185319,
-    0.34382043, 0.34573964, 0.34763692, 0.34948172, 0.35129044, 0.35304952,
-    0.35475122, 0.35640626, 0.3579826 , 0.35949714, 0.36094336, 0.36229195,
-    0.3635597 , 0.36473836, 0.36579828, 0.366748  , 0.36758102, 0.36828691,
-    0.36883982, 0.3692409 , 0.36948262, 0.36955344, 0.36944189, 0.36913688,
-    0.36862806, 0.36790621, 0.36696373, 0.36579487, 0.36439699, 0.36276941,
-    0.3609152 , 0.35884058, 0.35655508, 0.35407138, 0.35140496, 0.34857371,
-    0.34559732, 0.34249671, 0.33929342, 0.33600904, 0.3326647 , 0.32928363,
-    0.32588267, 0.3224787 , 0.31908743, 0.31572297, 0.31239786, 0.30912306,
-    0.30590808, 0.30276102, 0.2996887 , 0.2966967 , 0.29378953, 0.29097065,
-    0.2882426 , 0.28560705, 0.28306489, 0.28061848, 0.27826751, 0.27600835,
-    0.27383916, 0.27175753, 0.26976059, 0.26784495, 0.2660068 , 0.26424192,
-    0.26254879, 0.26092124, 0.2593516 , 0.25783427, 0.25636334, 0.25493271,
-    0.25353648, 0.25217148, 0.25082665, 0.2494952 , 0.24817023, 0.24684479,
-    0.24551313, 0.24416844, 0.2428016 , 0.24140563, 0.23997368, 0.23849902,
-    0.23697621, 0.2353966 , 0.23375381, 0.23204161, 0.23025401, 0.22838496,
-    0.22642823, 0.22437847, 0.22223061, 0.21997986, 0.21762015, 0.21514715,
-    0.21255756, 0.20984779, 0.20701463, 0.20405362, 0.20095999, 0.19773434,
-    0.19437503, 0.1908809 , 0.18725135, 0.1834808 , 0.17957218, 0.17552885,
-    0.17135306, 0.16704809, 0.1626184 , 0.15806468, 0.15339359, 0.14861956,
-    0.14375457, 0.13881352, 0.13381484, 0.12878121, 0.12374048, 0.11871631,
-    0.11375875, 0.10891932, 0.10425825, 0.09984694, 0.09576875, 0.09211897,
-    0.08900344, 0.08653544, 0.08483018, 0.08399726, 0.08413159, 0.08530458,
-    0.08755744, 0.09089837, 0.09530427, 0.10072634, 0.1070978 , 0.11434195,
-    0.12237919, 0.13113212, 0.14052892, 0.15050498, 0.16100355, 0.17197546,
-    0.18338793, 0.19520014, 0.20738006, 0.21990121, 0.23274066, 0.2459009,
-    0.25934606, 0.27305848, 0.28703874, 0.30127492, 0.3157365 , 0.33043715,
-    0.34534588, 0.3604517 , 0.37574975, 0.39120759, 0.40682457, 0.42256559,
-    0.4384161 , 0.45435289, 0.47033772, 0.48636436, 0.50238444, 0.51837124,
-    0.53430503, 0.55014493, 0.56586449, 0.58144061, 0.59684533, 0.61205655,
-    0.62705581, 0.64182617, 0.6563544 , 0.67063044, 0.68464533, 0.69839355,
-    0.71187229, 0.72507912, 0.73801226, 0.75067321, 0.76306221, 0.77518048,
-    0.7870294 , 0.79860909, 0.8099203 , 0.82096426, 0.83173724, 0.84223591,
-    0.85245837, 0.86239779, 0.87204252, 0.88137985, 0.89039307, 0.89906019,
-    0.90735227, 0.91523111, 0.92264608, 0.92953025, 0.93579566, 0.94132897,
-    0.9459897 , 0.94961072, 0.95202797, 0.95313791]), array(
-  [ 0.02115217, 0.02435766, 0.02770894, 0.03120549, 0.03486176, 0.03866843,
-    0.04255067, 0.04636284, 0.05011635, 0.05380853, 0.05745319, 0.06105552,
-    0.06460953, 0.06812624, 0.07160913, 0.07505172, 0.07846425, 0.08184654,
-    0.08519339, 0.08851553, 0.09180774, 0.09506735, 0.09830606, 0.10151324,
-    0.10469233, 0.1078471 , 0.11097106, 0.11407044, 0.11714148, 0.12018399,
-    0.12320135, 0.12618924, 0.12915144, 0.13208572, 0.13499318, 0.13787426,
-    0.14072906, 0.14355817, 0.14636281, 0.14914232, 0.1518996 , 0.15463338,
-    0.1573473 , 0.16004005, 0.16271589, 0.16537333, 0.16801777, 0.17064683,
-    0.17326807, 0.1758771 , 0.17848374, 0.1810841 , 0.18368643, 0.18629175,
-    0.18890301, 0.19152787, 0.19416467, 0.19682321, 0.19950818, 0.20222144,
-    0.20497307, 0.20776897, 0.21061428, 0.21351848, 0.21648954, 0.21953572,
-    0.22266685, 0.22589265, 0.22922275, 0.23266724, 0.23623633, 0.23994008,
-    0.24378808, 0.24778908, 0.25195046, 0.25627786, 0.26077448, 0.26544111,
-    0.27027536, 0.27527178, 0.2804218 , 0.28571395, 0.29113426, 0.29666673,
-    0.30229403, 0.30799808, 0.31376071, 0.31956423, 0.3253919 , 0.33122689,
-    0.33705636, 0.34286842, 0.34865273, 0.35440061, 0.36010493, 0.36576003,
-    0.37136152, 0.3769062 , 0.38239183, 0.38781708, 0.39318132, 0.39848456,
-    0.40372731, 0.40891054, 0.41403551, 0.41910295, 0.42411461, 0.42907338,
-    0.43398142, 0.43884093, 0.44365421, 0.4484236 , 0.45315147, 0.45784019,
-    0.46249111, 0.46710703, 0.4716911 , 0.47624563, 0.48077291, 0.48527518,
-    0.4897545 , 0.49421204, 0.49865123, 0.50307409, 0.50748262, 0.51187871,
-    0.51626387, 0.52063984, 0.52500891, 0.5293727 , 0.53373275, 0.5380905,
-    0.54244704, 0.5468042 , 0.55116316, 0.55552505, 0.55989088, 0.56426165,
-    0.56863832, 0.57302164, 0.57741226, 0.58181071, 0.58621776, 0.59063378,
-    0.59505889, 0.59949327, 0.60393701, 0.60839035, 0.61285357, 0.61732602,
-    0.62180742, 0.62629743, 0.63079562, 0.63530232, 0.63981655, 0.64433739,
-    0.64886414, 0.65339601, 0.65793219, 0.66247241, 0.66701578, 0.6715607,
-    0.67610609, 0.68065087, 0.68519387, 0.6897339 , 0.69426971, 0.69880098,
-    0.70332548, 0.7078417 , 0.71234818, 0.71684345, 0.72132594, 0.72579406,
-    0.73024615, 0.73468052, 0.73909539, 0.74348893, 0.74785925, 0.75220438,
-    0.75652229, 0.76081087, 0.76506792, 0.76929116, 0.77347824, 0.77762671,
-    0.78173401, 0.78579751, 0.78981448, 0.79378208, 0.79769739, 0.80155739,
-    0.80535919, 0.80909916, 0.81277394, 0.81638014, 0.81991431, 0.82337227,
-    0.82675067, 0.83004601, 0.8332539 , 0.83637039, 0.83939294, 0.84231614,
-    0.84513786, 0.84785477, 0.85046308, 0.85296202, 0.85534788, 0.85762123,
-    0.85978086, 0.86182713, 0.86376341, 0.86558903, 0.86731067, 0.86893272,
-    0.87045909, 0.87189851, 0.87325813, 0.87454523, 0.87576896, 0.87693806,
-    0.87806108, 0.87914681, 0.88020359, 0.88123929, 0.88226176, 0.88327808,
-    0.88429462, 0.88531766, 0.88635321, 0.8874062 , 0.88848178, 0.88958464,
-    0.89071918, 0.89189007, 0.89310154, 0.89435739, 0.89566299, 0.89702343,
-    0.89844289, 0.89992722, 0.90148396, 0.90312081, 0.90484675, 0.90667256,
-    0.90861145, 0.91068001, 0.91289948, 0.91529744, 0.91790984, 0.92078319,
-    0.92397484, 0.92755584, 0.93160009, 0.93616295]), array(
-  [ 0.00202189, 0.00551406, 0.00964551, 0.01445093, 0.02004956, 0.0264401,
-    0.03362203, 0.04168051, 0.04990578, 0.05800892, 0.06608536, 0.07416013,
-    0.0821575 , 0.0901558 , 0.09817059, 0.10611951, 0.11408814, 0.12206225,
-    0.12997097, 0.13792374, 0.14585317, 0.15371495, 0.16165622, 0.16953288,
-    0.17738626, 0.18527689, 0.19309761, 0.20095943, 0.20879604, 0.21659823,
-    0.22445639, 0.2322505 , 0.24010403, 0.2479312 , 0.25576831, 0.26363933,
-    0.27148133, 0.27940784, 0.28728597, 0.29527021, 0.30321929, 0.31125895,
-    0.3192891 , 0.32740412, 0.33552351, 0.34373191, 0.35194602, 0.36026328,
-    0.36857416, 0.37701245, 0.38544163, 0.39398549, 0.40255076, 0.41117865,
-    0.41988038, 0.42858781, 0.43740831, 0.44624508, 0.45509757, 0.46404412,
-    0.47298933, 0.48193114, 0.49093036, 0.499919  , 0.50887858, 0.51779951,
-    0.52670573, 0.53554666, 0.54429458, 0.55292889, 0.56142589, 0.56975888,
-    0.57789829, 0.58581211, 0.59346653, 0.60082694, 0.60785824, 0.6145276,
-    0.62080425, 0.6266615 , 0.63207769, 0.63703716, 0.64153073, 0.64555596,
-    0.64911698, 0.65222401, 0.65489262, 0.65714283, 0.65899809, 0.66048517,
-    0.66163121, 0.66246411, 0.66301175, 0.6633014 , 0.66335931, 0.66321038,
-    0.66287804, 0.66238411, 0.66174876, 0.66099052, 0.66012634, 0.65917165,
-    0.65814046, 0.6570454 , 0.6558979 , 0.65471   , 0.6534912 , 0.65224774,
-    0.65098687, 0.64971498, 0.64843772, 0.64716007, 0.64588635, 0.64462029,
-    0.64336792, 0.64213086, 0.64090915, 0.63970452, 0.6385183 , 0.6373514,
-    0.63620481, 0.63508181, 0.63397823, 0.63289364, 0.63182733, 0.63077828,
-    0.62974631, 0.62873002, 0.62772575, 0.62673148, 0.62574501, 0.62476397,
-    0.62378672, 0.62280874, 0.62182708, 0.62083861, 0.61984011, 0.61882799,
-    0.61779832, 0.61674751, 0.61567191, 0.6145678 , 0.61343038, 0.61225577,
-    0.61104058, 0.60978098, 0.60847311, 0.60711217, 0.60569296, 0.60421341,
-    0.6026698 , 0.60105844, 0.59937571, 0.59761482, 0.5957737 , 0.59385008,
-    0.59184061, 0.58974201, 0.5875511 , 0.58526193, 0.58287108, 0.58037826,
-    0.57778061, 0.57507533, 0.57225971, 0.56933109, 0.5662869 , 0.56311922,
-    0.55983012, 0.55641792, 0.55288035, 0.5492152 , 0.54542033, 0.54149367,
-    0.53743319, 0.53323696, 0.5289031 , 0.52442983, 0.51981546, 0.51505838,
-    0.51015712, 0.50511032, 0.4999168 , 0.49457554, 0.4890857 , 0.48344672,
-    0.47765827, 0.47172037, 0.46563338, 0.45939808, 0.45301574, 0.44648817,
-    0.43980205, 0.43297192, 0.42600399, 0.4189032 , 0.41167556, 0.40430087,
-    0.39681183, 0.38922019, 0.38152054, 0.37372536, 0.36586902, 0.35794158,
-    0.34998244, 0.34201236, 0.33404905, 0.32613936, 0.31830265, 0.31059314,
-    0.30304889, 0.29571869, 0.28866636, 0.28193158, 0.27559011, 0.26970074,
-    0.26431775, 0.25950931, 0.25533028, 0.25182957, 0.24905318, 0.24703636,
-    0.24580423, 0.24537215, 0.2457446 , 0.24691602, 0.2488721 , 0.25159047,
-    0.25504271, 0.25919611, 0.26401499, 0.26946277, 0.27550279, 0.28209979,
-    0.28922076, 0.29683505, 0.30491583, 0.31344099, 0.32238968, 0.33174591,
-    0.34150092, 0.35164825, 0.36218311, 0.37310746, 0.38442836, 0.39615832,
-    0.40831552, 0.42092364, 0.4340111 , 0.44760871, 0.46174485, 0.47643618,
-    0.49169001, 0.50743951, 0.52351526, 0.53960817]), np.ones(256))
+color_map_luts["arbre"] = (array(
+  [ 0.44131774, 0.44370177, 0.44605933, 0.44839054, 0.45067478, 0.45293504,
+    0.45516891, 0.45735977, 0.45952958, 0.46167133, 0.46377662, 0.4658642,
+    0.4679203 , 0.46994924, 0.4719642 , 0.47394237, 0.47590549, 0.47785176,
+    0.47976766, 0.48167637, 0.48355944, 0.48542865, 0.48728952, 0.48912419,
+    0.49095845, 0.49277134, 0.49457845, 0.49637777, 0.49816203, 0.49995019,
+    0.50171533, 0.50348733, 0.50524307, 0.50700027, 0.50874826, 0.51049227,
+    0.51223193, 0.5139633 , 0.51569293, 0.51741103, 0.51912775, 0.52083067,
+    0.52253029, 0.52421475, 0.52589174, 0.52755306, 0.52920041, 0.53083242,
+    0.53244156, 0.5340366 , 0.53559728, 0.53714122, 0.53864625, 0.54012236,
+    0.54156358, 0.5429568 , 0.54431299, 0.54561357, 0.5468594 , 0.54805251,
+    0.54917186, 0.55022123, 0.55119891, 0.55208818, 0.55288205, 0.55358155,
+    0.55417814, 0.55465144, 0.55499875, 0.55521509, 0.55529007, 0.55521283,
+    0.55496547, 0.55453915, 0.55392731, 0.55311827, 0.55210037, 0.55086221,
+    0.54939281, 0.54768186, 0.54571994, 0.5434988 , 0.54101159, 0.53825315,
+    0.53522015, 0.53191138, 0.52832782, 0.52447273, 0.52035171, 0.51597263,
+    0.51134556, 0.50648253, 0.5013963 , 0.49610438, 0.49062382, 0.48497218,
+    0.47916766, 0.47322878, 0.46717417, 0.46102228, 0.45479123, 0.44850028,
+    0.44216678, 0.43580508, 0.42943042, 0.42305718, 0.41669888, 0.41036814,
+    0.4040767 , 0.39783545, 0.39165443, 0.38554287, 0.37950919, 0.37356107,
+    0.36770548, 0.36194865, 0.35629619, 0.35075301, 0.34532345, 0.34001119,
+    0.33481937, 0.32975052, 0.32480662, 0.31998909, 0.3152988 , 0.31073609,
+    0.30630076, 0.30199208, 0.29780879, 0.29374913, 0.28981083, 0.28599112,
+    0.2822876 , 0.2786961 , 0.27521186, 0.27183029, 0.2685464 , 0.26535482,
+    0.26224983, 0.2592254 , 0.25627525, 0.25339285, 0.25057146, 0.24780425,
+    0.24508425, 0.24240456, 0.23975784, 0.23713717, 0.23453574, 0.23194694,
+    0.2293644 , 0.22678204, 0.22419418, 0.22159558, 0.21898153, 0.21634792,
+    0.21369107, 0.21100766, 0.2082966 , 0.20555708, 0.20278935, 0.19999489,
+    0.19717647, 0.19433834, 0.19148635, 0.18862811, 0.18577317, 0.18293319,
+    0.1801221 , 0.17735631, 0.17465482, 0.17203943, 0.16953482, 0.16716741,
+    0.16496905, 0.16297318, 0.16121584, 0.15973528, 0.15857143, 0.15776521,
+    0.15735758, 0.15738856, 0.15789607, 0.15891475, 0.16047487, 0.16260134,
+    0.16531299, 0.16862212, 0.17253417, 0.17704869, 0.18215934, 0.1878543,
+    0.19411741, 0.20092909, 0.20826721, 0.21610797, 0.22442667, 0.23319827,
+    0.24239792, 0.25200153, 0.26199056, 0.27233871, 0.28302468, 0.29402831,
+    0.30533058, 0.31691362, 0.3287703 , 0.34087697, 0.353218  , 0.36577922,
+    0.37855287, 0.39153018, 0.40469046, 0.41802196, 0.43152574, 0.4451854,
+    0.45898439, 0.47292271, 0.48699148, 0.50116973, 0.51546387, 0.52985625,
+    0.54433284, 0.55890334, 0.57353506, 0.58824263, 0.60299664, 0.61780476,
+    0.63264342, 0.64751846, 0.66240265, 0.67730956, 0.69220565, 0.70709988,
+    0.72197277, 0.73680781, 0.75161342, 0.76636275, 0.78104266, 0.7956434,
+    0.81016108, 0.82456869, 0.83884997, 0.85298622, 0.8669555 , 0.88073158,
+    0.89428251, 0.90756872, 0.92054055, 0.933148  , 0.94530521, 0.95690622,
+    0.96783447, 0.97790967, 0.986926  , 0.99464852]), array(
+  [ 0.05626182, 0.06063603, 0.06486246, 0.06895821, 0.07296132, 0.0768539,
+    0.08064911, 0.08437114, 0.08800482, 0.09156317, 0.09505774, 0.09847813,
+    0.10183875, 0.10513951, 0.1083756 , 0.11156296, 0.11469159, 0.11776515,
+    0.12079126, 0.12376178, 0.12668524, 0.1295584 , 0.13238112, 0.13515848,
+    0.13788542, 0.14056641, 0.14319933, 0.14578505, 0.14832396, 0.15081614,
+    0.15326083, 0.15565984, 0.15801096, 0.16031668, 0.16257524, 0.16478787,
+    0.16695485, 0.1690751 , 0.17115162, 0.17318085, 0.17516841, 0.1771089,
+    0.17900954, 0.18086473, 0.18268134, 0.18445603, 0.18619262, 0.18789332,
+    0.18955541, 0.19119063, 0.19278568, 0.19436176, 0.19590425, 0.19742907,
+    0.19893786, 0.20042606, 0.20191306, 0.20339073, 0.2048706 , 0.20636577,
+    0.20786968, 0.20939933, 0.21096715, 0.21257494, 0.21423423, 0.21596274,
+    0.21777215, 0.21966954, 0.22167374, 0.22380222, 0.22607063, 0.22849545,
+    0.23109288, 0.23388218, 0.23688208, 0.24011005, 0.24358302, 0.24731693,
+    0.2513262 , 0.25562329, 0.26021807, 0.26511744, 0.2703248 , 0.27583977,
+    0.28165796, 0.28777089, 0.29416603, 0.30082712, 0.30773447, 0.3148655,
+    0.3221953 , 0.32969733, 0.33734475, 0.34510833, 0.35296022, 0.36087366,
+    0.3688232 , 0.37678504, 0.38473729, 0.39266014, 0.40053592, 0.40834821,
+    0.41608347, 0.42373148, 0.43128293, 0.43873028, 0.44606759, 0.45329038,
+    0.46039546, 0.46738076, 0.47424525, 0.48098872, 0.48761174, 0.49411548,
+    0.50050166, 0.50677243, 0.51293031, 0.51897811, 0.52491887, 0.53075583,
+    0.53649234, 0.54213187, 0.54767795, 0.55313418, 0.55850412, 0.56379138,
+    0.56899954, 0.57413213, 0.57919266, 0.58418458, 0.58911129, 0.59397611,
+    0.59878205, 0.60353246, 0.60823062, 0.61287956, 0.61748225, 0.62204154,
+    0.62656023, 0.63104102, 0.63548651, 0.63989924, 0.64428164, 0.64863606,
+    0.65296474, 0.65726983, 0.66155346, 0.66581762, 0.67006415, 0.67429484,
+    0.67851134, 0.68271522, 0.68690793, 0.69109084, 0.69526517, 0.69943206,
+    0.70359259, 0.70774778, 0.7118983 , 0.71604481, 0.72018788, 0.72432796,
+    0.72846539, 0.73260037, 0.73673302, 0.74086331, 0.74499111, 0.7491162,
+    0.75323821, 0.7573567 , 0.76147109, 0.76558071, 0.7696848 , 0.77378264,
+    0.7778731 , 0.7819551 , 0.78602749, 0.79008902, 0.79413838, 0.79817417,
+    0.80219489, 0.80619902, 0.81018492, 0.81415092, 0.81809528, 0.82201619,
+    0.8259118 , 0.82978021, 0.83361959, 0.83742793, 0.84120311, 0.84494307,
+    0.84864571, 0.85230891, 0.85593052, 0.85950837, 0.86304028, 0.86652408,
+    0.86995756, 0.87333854, 0.87666491, 0.87993431, 0.88314454, 0.88629345,
+    0.88937891, 0.89239883, 0.89535078, 0.89823289, 0.9010432 , 0.9037798,
+    0.90644047, 0.90902303, 0.91152628, 0.91394863, 0.91628739, 0.91854149,
+    0.92071018, 0.92279108, 0.92478281, 0.92668571, 0.92849682, 0.93021638,
+    0.93184454, 0.9333779 , 0.93481986, 0.93616638, 0.93742098, 0.9385809,
+    0.93964931, 0.94062391, 0.94150928, 0.94230167, 0.94300773, 0.9436249,
+    0.9441573 , 0.94460885, 0.94497728, 0.9452696 , 0.94548972, 0.94564084,
+    0.94572458, 0.94574964, 0.94572195, 0.94564856, 0.94553784, 0.94539999,
+    0.94524753, 0.94509613, 0.94496566, 0.94487644, 0.94486297, 0.94496862,
+    0.9452415 , 0.94575723, 0.94660423, 0.94788672]), array(
+  [ 0.04951266, 0.06083219, 0.07165053, 0.08210915, 0.09247329, 0.10260581,
+    0.11257765, 0.12255122, 0.13238498, 0.14215586, 0.15194141, 0.16162489,
+    0.17131665, 0.18099838, 0.19059467, 0.20026607, 0.20988044, 0.21946015,
+    0.22909981, 0.23866289, 0.24827349, 0.25786718, 0.2674192 , 0.27704703,
+    0.28660208, 0.2962198 , 0.30581382, 0.31540414, 0.3250423 , 0.33462062,
+    0.34430922, 0.35393291, 0.36363845, 0.37332276, 0.38305586, 0.39280948,
+    0.40258886, 0.41242099, 0.42226577, 0.43218573, 0.44211511, 0.45213185,
+    0.46216464, 0.47228627, 0.48244044, 0.49267364, 0.50296577, 0.51331525,
+    0.52375993, 0.53422779, 0.54483694, 0.55545388, 0.56620423, 0.57700138,
+    0.5878611 , 0.59884035, 0.60984329, 0.62095468, 0.63213636, 0.6433441,
+    0.6546616 , 0.6660279 , 0.67741372, 0.68886382, 0.70036612, 0.71186928,
+    0.72336666, 0.73489587, 0.74641297, 0.75788429, 0.76929516, 0.7806286,
+    0.79188503, 0.80303085, 0.81402588, 0.82484364, 0.83545535, 0.84583016,
+    0.85593546, 0.86573722, 0.87520057, 0.88429036, 0.89297186, 0.90121149,
+    0.90897763, 0.91624135, 0.92297716, 0.92916362, 0.93478393, 0.93982627,
+    0.944284  , 0.9481558 , 0.95144555, 0.95416177, 0.95631799, 0.95793177,
+    0.95902407, 0.95961865, 0.95974148, 0.95942014, 0.95868324, 0.95756071,
+    0.95608217, 0.9542758 , 0.95216987, 0.94979177, 0.94716782, 0.94432316,
+    0.94128162, 0.93806566, 0.93469637, 0.93119344, 0.92757518, 0.92385855,
+    0.92005924, 0.91619167, 0.91226907, 0.90830358, 0.90430622, 0.90028706,
+    0.8962552 , 0.89221888, 0.88818549, 0.88416167, 0.88015334, 0.87616574,
+    0.87220349, 0.86827062, 0.86437059, 0.86050638, 0.85668045, 0.85289483,
+    0.84915193, 0.84545249, 0.84179674, 0.83818509, 0.83461764, 0.83109413,
+    0.82761399, 0.82417636, 0.82078008, 0.81742372, 0.81410558, 0.81082372,
+    0.80757594, 0.80435989, 0.80117258, 0.79801115, 0.79487252, 0.79175341,
+    0.78865036, 0.78555973, 0.78247771, 0.77940035, 0.77632353, 0.77324301,
+    0.77015423, 0.76705215, 0.76393282, 0.76079161, 0.75762382, 0.75442468,
+    0.7511894 , 0.74791315, 0.74459109, 0.74121838, 0.73779019, 0.73430173,
+    0.73074825, 0.72712503, 0.72342746, 0.71965096, 0.71579107, 0.71184237,
+    0.70780168, 0.70366489, 0.69942802, 0.69508722, 0.69063878, 0.68607916,
+    0.68140497, 0.67661297, 0.67170012, 0.66666354, 0.66150054, 0.65620859,
+    0.65078539, 0.64522878, 0.63953554, 0.63370386, 0.62773308, 0.62162183,
+    0.61536895, 0.60897349, 0.60243471, 0.59575208, 0.5889253 , 0.58195428,
+    0.57483914, 0.56757997, 0.56016891, 0.55261464, 0.54491821, 0.53708088,
+    0.52910417, 0.52098984, 0.51272693, 0.50432884, 0.49579898, 0.48714013,
+    0.47834844, 0.46942272, 0.46037716, 0.45121575, 0.4419283 , 0.43252618,
+    0.42302131, 0.41340722, 0.40368844, 0.3938839 , 0.38398203, 0.37399931,
+    0.36394886, 0.35381793, 0.34364247, 0.33340639, 0.32314442, 0.31285132,
+    0.30255889, 0.2922684 , 0.28202116, 0.27181653, 0.26171116, 0.25171982,
+    0.24189603, 0.23229831, 0.22296839, 0.21399898, 0.20548226, 0.19752366,
+    0.19024226, 0.18379968, 0.17836522, 0.17412601, 0.17128095, 0.17003118,
+    0.17056788, 0.17305931, 0.17763999, 0.18441057, 0.19343386, 0.2047358,
+    0.21833687, 0.2341978 , 0.25223614, 0.2722682]), np.ones(256))
 
 # Used to reconstruct the colormap in viscm
 parameters = {'xp': [-2.3569023569023386, 29.24031986531989, 21.948653198653204, -25.44718013468011, -4.78745791245791],
@@ -8101,7 +8101,7 @@
               'min_Jp': 15,
               'max_Jp': 95}
 
-color_map_luts['cm_candidate_3'] = (array(
+color_map_luts["kelp"] = (array(
   [ 0.07873808, 0.08503098, 0.09119215, 0.09725944, 0.10324966, 0.10914691,
     0.1149903 , 0.12076614, 0.12647234, 0.13214487, 0.13775951, 0.14331952,
     0.14885405, 0.15434127, 0.15978387, 0.16520148, 0.17058327, 0.17592717,
@@ -8233,147 +8233,148 @@
     0.53036982, 0.52734442, 0.52433316, 0.52118636]), np.ones(256))
 
 # Used to reconstruct the colormap in viscm
-parameters = {'xp': [6.4995757388238928, -16.241760894839473,
-                -12.632024921242106, -21.656364855235495, 7.5824965309031143,
-                6.4995757388238928, 86.274740755325524, 15.884889270177041,
-                -11.188130531803154, 3.9727605573057474],
-              'yp': [-0.7838283828382373, -30.022689768976846,
-                -9.447194719471895, 6.7966171617162274, -0.7838283828382373,
-                20.152640264026445, 37.840346534653492, 13.294141914191471,
-                40.728135313531396, -0.7838283828382373],
-              'min_Jp': 3.96624472574,
-              'max_Jp': 96.2869198312}
+parameters = {'xp': [ 3.444773825208614, -17.207400087834856,
+                    -12.632024921242106, -21.656364855235495,
+                     16.850570926657895,  55.256368028107175,
+                     14.676657883179644,  12.502744839701393,
+                     40.401295564339051,   0.90854194115064502],
+              'yp': [-1.6304347826086598, -24.818840579710098,
+                     -9.447194719471895,    6.796617161716227,
+                     -5.6159420289854722,  57.065217391304373,
+                     13.224637681159436,   3.4420289855072781,
+                     58.514492753623216, 0.1811594202898732],
+              'min_Jp': 3.96624472574, 'max_Jp': 96.5975103734}
 
-color_map_luts['cm_candidate_4'] = (array(
-  [ 0.03522636, 0.03833067, 0.04137086, 0.04422592, 0.0469077 , 0.04949927,
-    0.05195494, 0.05435346, 0.05668617, 0.05895159, 0.06118936, 0.06333705,
-    0.0654643 , 0.06747923, 0.06945627, 0.07130747, 0.07309864, 0.07473997,
-    0.07630552, 0.0776979 , 0.07898537, 0.08010829, 0.08107311, 0.08190198,
-    0.08249749, 0.08292767, 0.083176  , 0.08316237, 0.08291788, 0.08242916,
-    0.08166884, 0.08060568, 0.07920431, 0.0774235 , 0.07522534, 0.07255839,
-    0.06937739, 0.0656378 , 0.06127894, 0.05615142, 0.05030332, 0.04372508,
-    0.03661499, 0.03005334, 0.02491817, 0.02190702, 0.02148032, 0.02375703,
-    0.02860561, 0.03577661, 0.04479347, 0.05435906, 0.06405802, 0.073712,
-    0.083229  , 0.09254701, 0.10166185, 0.11057105, 0.11925526, 0.12774985,
-    0.13603271, 0.14414105, 0.15206539, 0.15982306, 0.16742795, 0.17487481,
-    0.1821786 , 0.18934939, 0.19639219, 0.20330755, 0.21010013, 0.21677862,
-    0.22334613, 0.22980539, 0.2361588 , 0.24240872, 0.24855784, 0.25461006,
-    0.26057194, 0.26645523, 0.27228068, 0.27808294, 0.28391451, 0.28984425,
-    0.29594531, 0.30227382, 0.30885036, 0.31565791, 0.32265606, 0.32979907,
-    0.33704768, 0.34437291, 0.35175534, 0.35917963, 0.36664001, 0.37413354,
-    0.38165767, 0.38921099, 0.39679271, 0.40440245, 0.41203793, 0.41969968,
-    0.42738983, 0.43510843, 0.44285558, 0.45063135, 0.45843459, 0.46626598,
-    0.47412702, 0.48201778, 0.48993829, 0.49788858, 0.50586861, 0.51387941,
-    0.52192105, 0.52999354, 0.53809685, 0.54623165, 0.55439949, 0.56259924,
-    0.57083088, 0.57909441, 0.58738981, 0.59571933, 0.60408406, 0.61248174,
-    0.62091239, 0.62937605, 0.63787278, 0.64640265, 0.65496986, 0.6635733,
-    0.67221077, 0.68088234, 0.68958809, 0.69832812, 0.70710251, 0.71591134,
-    0.72475464, 0.73363239, 0.74254444, 0.75149049, 0.76046998, 0.76948199,
-    0.77852511, 0.7875972 , 0.79669513, 0.80581439, 0.81497372, 0.82415011,
-    0.83332691, 0.84252673, 0.85170847, 0.86088203, 0.86999225, 0.87899789,
-    0.88778843, 0.89611225, 0.90337493, 0.90860929, 0.91175598, 0.91370041,
-    0.91501944, 0.91599762, 0.91675056, 0.91732023, 0.91778036, 0.91811232,
-    0.91837528, 0.91855668, 0.91865995, 0.91869408, 0.91866521, 0.91857744,
-    0.91843343, 0.91823488, 0.91798277, 0.91767766, 0.91731981, 0.91690936,
-    0.91644637, 0.91593091, 0.91536311, 0.91474318, 0.91407949, 0.91336534,
-    0.91259789, 0.9117775 , 0.91090473, 0.90998027, 0.90900929, 0.90798541,
-    0.90690915, 0.90578172, 0.90460452, 0.90337725, 0.90209964, 0.90077411,
-    0.8994031 , 0.89798939, 0.89653293, 0.89503377, 0.8935014 , 0.89194155,
-    0.89036098, 0.88876764, 0.88716861, 0.88557213, 0.88399934, 0.88246708,
-    0.88099466, 0.87960391, 0.87831891, 0.87716549, 0.87617292, 0.87537354,
-    0.87479129, 0.87444814, 0.87436113, 0.87454145, 0.87499843, 0.87576485,
-    0.87680691, 0.87811326, 0.87968158, 0.88153454, 0.88361008, 0.88590523,
-    0.88844166, 0.89115133, 0.89406565, 0.89714468, 0.90038245, 0.90377593,
-    0.907298  , 0.91095738, 0.91472653, 0.91860958, 0.92259009, 0.92665895,
-    0.93081455, 0.93503101, 0.93931993, 0.9436451 , 0.94800146, 0.9523663,
-    0.95670453, 0.96101117, 0.96524557, 0.96939757, 0.97351615, 0.97761533,
-    0.9817668 , 0.98605907, 0.99050657, 0.99513559]), array(
-  [ 0.00717629, 0.00941214, 0.01190817, 0.01466782, 0.01769325, 0.02096064,
-    0.02448619, 0.02824312, 0.03223296, 0.0364546 , 0.04088298, 0.04530612,
-    0.04965416, 0.05396345, 0.05821592, 0.06244244, 0.06662853, 0.07080188,
-    0.07494638, 0.07908871, 0.08321444, 0.08733926, 0.09146285, 0.09558174,
-    0.09971703, 0.10385498, 0.10799951, 0.11216606, 0.11634821, 0.12054804,
-    0.12476983, 0.12901801, 0.13329717, 0.13761219, 0.1419667 , 0.14636562,
-    0.15081199, 0.1553078 , 0.1598563 , 0.16447093, 0.16913797, 0.17385302,
-    0.17859758, 0.18333009, 0.18798922, 0.19250917, 0.19683753, 0.20095108,
-    0.20485138, 0.2085565 , 0.21208884, 0.21547059, 0.2187219 , 0.22186023,
-    0.22490038, 0.22785619, 0.23073769, 0.23355385, 0.23631507, 0.23902539,
-    0.2416948 , 0.24432528, 0.24692442, 0.24949553, 0.25204168, 0.25456886,
-    0.25707918, 0.25957507, 0.26205962, 0.26453663, 0.26700889, 0.26947802,
-    0.27194668, 0.2744176 , 0.27689351, 0.27937719, 0.28187144, 0.28437889,
-    0.28690162, 0.28944039, 0.2919933 , 0.29455367, 0.29710773, 0.29963342,
-    0.30210294, 0.30448964, 0.30677635, 0.3089592 , 0.31104521, 0.31304651,
-    0.31497559, 0.31684275, 0.31865561, 0.32042051, 0.32214032, 0.32381667,
-    0.32545088, 0.32704374, 0.32859564, 0.33010679, 0.33157808, 0.33300923,
-    0.33439917, 0.33574763, 0.33705434, 0.33831896, 0.33954175, 0.34072209,
-    0.34185885, 0.34295161, 0.34399994, 0.34500336, 0.34596145, 0.34687315,
-    0.34773789, 0.34855512, 0.34932428, 0.35004438, 0.35071386, 0.35133271,
-    0.35190023, 0.35241573, 0.35287848, 0.35328622, 0.35363738, 0.35393254,
-    0.35417082, 0.35435127, 0.3544729 , 0.35453467, 0.35453246, 0.3544659,
-    0.35433547, 0.35413995, 0.35387806, 0.35354849, 0.35314988, 0.35268084,
-    0.35213998, 0.35152592, 0.35083734, 0.35007306, 0.34923213, 0.34831394,
-    0.34731844, 0.34624641, 0.34509977, 0.34388221, 0.3425743 , 0.34119867,
-    0.33977344, 0.33827666, 0.33675521, 0.33520455, 0.33369577, 0.33229408,
-    0.3311592 , 0.33064529, 0.3315562 , 0.33510297, 0.34120532, 0.34863721,
-    0.35665329, 0.36490048, 0.37324447, 0.38164035, 0.39000738, 0.39837187,
-    0.4066732 , 0.41493037, 0.42314374, 0.43130788, 0.43942037, 0.44748078,
-    0.4554899 , 0.46344929, 0.4713609 , 0.47922684, 0.48704928, 0.49483028,
-    0.50257176, 0.51027545, 0.51794292, 0.52557548, 0.53316833, 0.54072793,
-    0.5482575 , 0.55575771, 0.56322906, 0.57067187, 0.57808347, 0.58546876,
-    0.59282791, 0.60016055, 0.60746619, 0.61474529, 0.6219982 , 0.62922358,
-    0.6364201 , 0.64358621, 0.65072187, 0.65782681, 0.66489576, 0.67192551,
-    0.67891244, 0.68585233, 0.69274151, 0.69957569, 0.70634483, 0.71304131,
-    0.71965675, 0.72618213, 0.73260803, 0.73892495, 0.7451228 , 0.75119134,
-    0.75712447, 0.76291767, 0.76856894, 0.77407887, 0.77944929, 0.78467669,
-    0.78977658, 0.79475738, 0.79962537, 0.80438334, 0.8090495 , 0.81362981,
-    0.81812939, 0.822561  , 0.82692955, 0.83124292, 0.83550709, 0.83972927,
-    0.84391222, 0.84806526, 0.8521879 , 0.85628939, 0.8603707 , 0.86443499,
-    0.86849289, 0.8725333 , 0.87658679, 0.88063259, 0.88469332, 0.88877587,
-    0.89287903, 0.8970406 , 0.90125214, 0.90552454, 0.90987168, 0.91426387,
-    0.91867981, 0.92310252, 0.92749634, 0.93184435]), array(
-  [ 0.01748575, 0.02188322, 0.02672053, 0.0320049 , 0.0377461 , 0.04374005,
-    0.04967379, 0.05549667, 0.06125422, 0.06697896, 0.07265592, 0.07836626,
-    0.08404944, 0.08980628, 0.09555785, 0.10139442, 0.1072383 , 0.1131781,
-    0.11912453, 0.12517083, 0.13123399, 0.13736417, 0.14354194, 0.14973423,
-    0.15601774, 0.16231425, 0.16862456, 0.17499629, 0.1813827 , 0.18777277,
-    0.19416216, 0.20054403, 0.20690794, 0.21323963, 0.21951507, 0.22570772,
-    0.23177787, 0.23767532, 0.24334283, 0.24872311, 0.25369558, 0.25814289,
-    0.26191917, 0.26487715, 0.26692571, 0.26808099, 0.2684773 , 0.26831057,
-    0.26778035, 0.26703712, 0.26618953, 0.26531302, 0.26445711, 0.26365339,
-    0.26292142, 0.26228611, 0.26174391, 0.26129748, 0.26096479, 0.26072575,
-    0.26060175, 0.26057155, 0.26064668, 0.26081902, 0.2610823 , 0.26144311,
-    0.26189315, 0.26242706, 0.26304235, 0.26373904, 0.26451357, 0.26535872,
-    0.26627022, 0.2672428 , 0.26826982, 0.26934258, 0.27044939, 0.27157424,
-    0.27269501, 0.27378145, 0.27479358, 0.27568221, 0.27639444, 0.27688646,
-    0.27714116, 0.27717994, 0.27705717, 0.27683892, 0.27658103, 0.27631892,
-    0.27606894, 0.27583456, 0.27561248, 0.27539915, 0.27518661, 0.27496758,
-    0.27473682, 0.27448999, 0.27422348, 0.27393436, 0.27362188, 0.27328355,
-    0.2729159 , 0.27251738, 0.27208666, 0.27162255, 0.27112496, 0.27059242,
-    0.2700227 , 0.26941486, 0.26876801, 0.26808128, 0.26735382, 0.26658388,
-    0.26577047, 0.26491267, 0.26400955, 0.26305951, 0.26206006, 0.26101115,
-    0.25991162, 0.25876029, 0.25755586, 0.25629478, 0.25497448, 0.25359554,
-    0.25215625, 0.25065475, 0.24908901, 0.24745683, 0.24575146, 0.24397141,
-    0.24211619, 0.24018264, 0.23816727, 0.23606623, 0.23387523, 0.23158951,
-    0.22920377, 0.22671207, 0.22410782, 0.22138361, 0.21853118, 0.2155413,
-    0.21240363, 0.20910667, 0.20563757, 0.20198213, 0.19808683, 0.19395191,
-    0.18956227, 0.18482577, 0.17973484, 0.17416272, 0.16804944, 0.16121776,
-    0.15345263, 0.14450283, 0.13440947, 0.124886  , 0.11850527, 0.11530169,
-    0.11429895, 0.11482246, 0.11642615, 0.11883084, 0.12186164, 0.12538522,
-    0.12930681, 0.1335561 , 0.13808108, 0.14284015, 0.1478005 , 0.15293632,
-    0.15822735, 0.16365776, 0.16921525, 0.1748903 , 0.1806757 , 0.18656606,
-    0.19255748, 0.1986473 , 0.2048339 , 0.2111165 , 0.21749036, 0.22396009,
-    0.23052897, 0.23719906, 0.2439729 , 0.25085347, 0.25784138, 0.26494541,
-    0.27217054, 0.27952188, 0.28700505, 0.29462764, 0.30239803, 0.31032349,
-    0.31841207, 0.3266725 , 0.33511695, 0.34375899, 0.35260525, 0.36166667,
-    0.37095463, 0.38048072, 0.39025901, 0.40030417, 0.41061811, 0.42120758,
-    0.43207661, 0.44322563, 0.45465067, 0.46634268, 0.47828317, 0.49044181,
-    0.50279317, 0.51530648, 0.52795044, 0.54069578, 0.55350777, 0.56628119,
-    0.57905927, 0.59183607, 0.60457689, 0.61715156, 0.62970523, 0.64220124,
-    0.65448412, 0.66677747, 0.67887524, 0.6909046 , 0.70283761, 0.71461871,
-    0.72635938, 0.73792219, 0.74945264, 0.76083717, 0.77214931, 0.7834029,
-    0.79449887, 0.80567531, 0.81656784, 0.82755657, 0.83843574, 0.84923937,
-    0.86013357, 0.87076849, 0.88140532, 0.89198331, 0.90217129, 0.91220561,
-    0.922003  , 0.93137403, 0.94060562, 0.94975586]), np.ones(256))
+color_map_luts["dusk"] = (array(
+  [ 0.02379297, 0.0261157 , 0.02850455, 0.03095137, 0.0334476 , 0.0360304,
+    0.03863824, 0.04128529, 0.04384689, 0.04631624, 0.04870907, 0.05097181,
+    0.05316059, 0.05519077, 0.05714277, 0.05892787, 0.06062771, 0.06214595,
+    0.06357023, 0.06480933, 0.06592827, 0.06687993, 0.06766224, 0.06830946,
+    0.06872415, 0.0689718 , 0.06904334, 0.06886045, 0.06844799, 0.06780027,
+    0.06689054, 0.06568777, 0.06415629, 0.06225567, 0.05994102, 0.05716433,
+    0.0538775 , 0.05003897, 0.04551728, 0.04029295, 0.03446229, 0.02874366,
+    0.02385447, 0.02127502, 0.02242508, 0.0275301 , 0.03578777, 0.0460108,
+    0.05640918, 0.06656043, 0.07634217, 0.08577769, 0.0948332 , 0.10358394,
+    0.11202107, 0.1201887 , 0.12811962, 0.13583301, 0.14334681, 0.15067405,
+    0.15783121, 0.16483645, 0.17170112, 0.17843538, 0.18504822, 0.19154762,
+    0.19793598, 0.20422296, 0.21041472, 0.21651599, 0.22253073, 0.22846216,
+    0.23431275, 0.24008353, 0.24576942, 0.25137838, 0.25691024, 0.26236467,
+    0.2677416 , 0.27303884, 0.27827346, 0.28346958, 0.28867439, 0.29395695,
+    0.29940706, 0.30508966, 0.31101811, 0.31715338, 0.32345196, 0.32986905,
+    0.33636963, 0.34292913, 0.34953992, 0.35619361, 0.36288108, 0.36960242,
+    0.37635671, 0.38314097, 0.38995328, 0.39679542, 0.40366619, 0.41056374,
+    0.41748952, 0.4244427 , 0.43142299, 0.43843086, 0.44546541, 0.45252642,
+    0.45961609, 0.46673201, 0.47387346, 0.48104441, 0.4882424 , 0.4954658,
+    0.5027165 , 0.50999774, 0.51730448, 0.52463621, 0.53199987, 0.53939166,
+    0.54680881, 0.55425097, 0.56172938, 0.56923401, 0.57676432, 0.58432014,
+    0.59191314, 0.59953375, 0.60718079, 0.6148543 , 0.62255842, 0.63029849,
+    0.63806608, 0.64586138, 0.65368467, 0.66153629, 0.66942125, 0.67734101,
+    0.68528996, 0.69326842, 0.70127674, 0.70931527, 0.71738431, 0.72548409,
+    0.73361473, 0.74177618, 0.74996816, 0.75819004, 0.76644077, 0.77471875,
+    0.78302933, 0.79137839, 0.79974926, 0.80813611, 0.81657205, 0.82501275,
+    0.83349386, 0.84198258, 0.85048492, 0.8590003 , 0.86751392, 0.87598709,
+    0.88439937, 0.89263161, 0.90038304, 0.90687168, 0.91166375, 0.91561895,
+    0.91938826, 0.92318052, 0.9269946 , 0.93070173, 0.93405155, 0.93665497,
+    0.93818572, 0.93886516, 0.93923641, 0.93951639, 0.93979528, 0.94008554,
+    0.9403692 , 0.94071214, 0.94104779, 0.94137354, 0.94172077, 0.9420958,
+    0.94244563, 0.94274487, 0.94309703, 0.94344151, 0.94369514, 0.94407978,
+    0.94432487, 0.94468376, 0.9449135 , 0.94526127, 0.94544804, 0.945808,
+    0.94601776, 0.94631082, 0.94656632, 0.9467505 , 0.94706511, 0.94725625,
+    0.9474877 , 0.94776129, 0.9479231 , 0.94816834, 0.94843142, 0.94859414,
+    0.9488144 , 0.94909331, 0.9492829 , 0.94943682, 0.94975423, 0.94999285,
+    0.95015253, 0.9504141 , 0.95072075, 0.95095877, 0.95112839, 0.95145943,
+    0.95179158, 0.95206534, 0.95228128, 0.95264039, 0.95303147, 0.95337439,
+    0.95367005, 0.95401622, 0.95449739, 0.95494063, 0.95534717, 0.95571839,
+    0.95624412, 0.95681749, 0.95736483, 0.9578879 , 0.95838862, 0.95906407,
+    0.95978233, 0.96048784, 0.96118301, 0.96187054, 0.96267628, 0.96359786,
+    0.96452242, 0.96545346, 0.9663949 , 0.96735109, 0.96853232, 0.96975252,
+    0.97100046, 0.9722811 , 0.97360166, 0.97499094]), array(
+  [ 0.01131879, 0.01391783, 0.01674789, 0.01980761, 0.02309557, 0.02659703,
+    0.03032697, 0.03427332, 0.03844574, 0.04276988, 0.04704341, 0.05129054,
+    0.05550078, 0.05969933, 0.06386921, 0.06803423, 0.07217737, 0.07632217,
+    0.08045053, 0.08458326, 0.0887074 , 0.09283339, 0.0969619 , 0.10108667,
+    0.10522618, 0.10936788, 0.1135134 , 0.1176759 , 0.12185004, 0.12603586,
+    0.13023651, 0.1344553 , 0.13869563, 0.14296085, 0.14725413, 0.15157814,
+    0.15593465, 0.16032384, 0.16475544, 0.16922528, 0.17373863, 0.17827184,
+    0.1827844 , 0.18715937, 0.19126004, 0.19503246, 0.19852956, 0.20182311,
+    0.20496848, 0.20800345, 0.21095494, 0.21383778, 0.21666923, 0.21945445,
+    0.22220507, 0.22492545, 0.22761943, 0.23029139, 0.232945  , 0.23558391,
+    0.23821051, 0.24082617, 0.24343288, 0.24603239, 0.24862627, 0.25121599,
+    0.25380393, 0.25639065, 0.25897721, 0.26156482, 0.2641547 , 0.26674815,
+    0.26934657, 0.27195171, 0.274567  , 0.27719243, 0.27983033, 0.28248333,
+    0.2851544 , 0.28784764, 0.29056213, 0.2932949 , 0.29603493, 0.29876097,
+    0.30144013, 0.30404208, 0.30655158, 0.3089731 , 0.31131738, 0.31359848,
+    0.31582875, 0.31801758, 0.32016861, 0.32228546, 0.3243719 , 0.32642829,
+    0.32845517, 0.33045378, 0.33242493, 0.33436795, 0.33628328, 0.33817163,
+    0.34003234, 0.34186568, 0.34367166, 0.34544999, 0.34720092, 0.34892444,
+    0.35061943, 0.35228681, 0.35392679, 0.35553735, 0.35711946, 0.35867374,
+    0.36019909, 0.36169371, 0.36315988, 0.36459771, 0.36600339, 0.36737862,
+    0.36872465, 0.37004148, 0.37132268, 0.37257345, 0.37379387, 0.37498382,
+    0.37613623, 0.37725628, 0.37834438, 0.37940027, 0.38042109, 0.38140311,
+    0.38235118, 0.38326484, 0.38414358, 0.38498682, 0.38579085, 0.38655416,
+    0.38728007, 0.38796794, 0.3886171 , 0.38922686, 0.38979653, 0.39032549,
+    0.39081315, 0.39125906, 0.39166297, 0.39202487, 0.39234513, 0.39262465,
+    0.39285875, 0.39304214, 0.39318804, 0.39330134, 0.3933541 , 0.39338391,
+    0.39336059, 0.3933133 , 0.39323839, 0.39313869, 0.39303156, 0.39295965,
+    0.39295557, 0.39315799, 0.39391119, 0.39605497, 0.39998337, 0.40475771,
+    0.40971377, 0.41464651, 0.41955776, 0.42457034, 0.42991332, 0.43592086,
+    0.44281731, 0.45031271, 0.45790211, 0.46542398, 0.47282633, 0.48011752,
+    0.48732483, 0.49440541, 0.50141998, 0.50837558, 0.51525348, 0.52205415,
+    0.52881949, 0.53556909, 0.54223396, 0.54885896, 0.55550043, 0.56201556,
+    0.56858101, 0.57503769, 0.58153866, 0.58793414, 0.59439323, 0.60071983,
+    0.60710417, 0.61341299, 0.61971646, 0.6260332 , 0.63225511, 0.63852001,
+    0.64474137, 0.65092034, 0.65713626, 0.66329112, 0.66941973, 0.67558021,
+    0.68169667, 0.68777066, 0.69387233, 0.69997643, 0.70599397, 0.71203573,
+    0.71810111, 0.72411175, 0.73009422, 0.73609755, 0.74212117, 0.74807095,
+    0.75401458, 0.75997634, 0.7659557 , 0.77187449, 0.77777791, 0.78369726,
+    0.78963205, 0.79554605, 0.80140962, 0.80728734, 0.81317876, 0.8190834,
+    0.82493514, 0.83077307, 0.83662327, 0.84248528, 0.84835861, 0.85417838,
+    0.85998992, 0.86581204, 0.87164424, 0.877486  , 0.88329814, 0.88908291,
+    0.8948766 , 0.9006787 , 0.90648868, 0.91230608, 0.91806899, 0.9238354,
+    0.92961071, 0.93539672, 0.94119732, 0.94701577]), array(
+  [ 0.02001135, 0.02410049, 0.0285127 , 0.03327199, 0.03840892, 0.04377278,
+    0.04914302, 0.05449872, 0.05987322, 0.06527853, 0.07069504, 0.07616998,
+    0.08164232, 0.08718911, 0.0927269 , 0.09833499, 0.10393204, 0.10960072,
+    0.11525772, 0.12097641, 0.12669592, 0.13244873, 0.13822862, 0.14400333,
+    0.14983392, 0.15566404, 0.16149121, 0.16735218, 0.17321659, 0.17907296,
+    0.18491908, 0.19075078, 0.19656126, 0.20233991, 0.20807104, 0.21373203,
+    0.21929102, 0.22470403, 0.22993048, 0.2348811 , 0.23944453, 0.24342373,
+    0.24654791, 0.24854699, 0.24944069, 0.24964107, 0.24954394, 0.24938897,
+    0.24925926, 0.24920029, 0.24923631, 0.24934712, 0.24956187, 0.24984924,
+    0.25022917, 0.25068854, 0.25121928, 0.25182018, 0.25248939, 0.25322692,
+    0.25402939, 0.25489122, 0.25581039, 0.25678491, 0.25781293, 0.25889267,
+    0.26002567, 0.26120803, 0.26243772, 0.26371326, 0.26503318, 0.26639597,
+    0.26779993, 0.26924355, 0.27072854, 0.27224553, 0.27378857, 0.27534839,
+    0.27691073, 0.27845714, 0.27994688, 0.28132934, 0.28254139, 0.28352939,
+    0.28426343, 0.28477016, 0.28511888, 0.2853897 , 0.28562523, 0.28584846,
+    0.28606765, 0.28628369, 0.28648659, 0.2866717 , 0.28683846, 0.28698041,
+    0.28709378, 0.28717794, 0.2872321 , 0.28725271, 0.28723935, 0.28719253,
+    0.28710992, 0.28699145, 0.2868367 , 0.28664459, 0.28641544, 0.28614894,
+    0.28584249, 0.28549799, 0.28511576, 0.28469141, 0.28422699, 0.28372376,
+    0.28317939, 0.28259016, 0.28196082, 0.28129151, 0.28057453, 0.27981338,
+    0.27901054, 0.27816595, 0.27726695, 0.2763238 , 0.27533659, 0.27430498,
+    0.27321529, 0.27207747, 0.27089221, 0.26965877, 0.26837161, 0.26702355,
+    0.26562346, 0.26417019, 0.2626624 , 0.26109861, 0.25947144, 0.25777762,
+    0.25602258, 0.25420436, 0.25232078, 0.25036953, 0.2483481 , 0.24625386,
+    0.24408403, 0.24183578, 0.23950621, 0.23709248, 0.23459188, 0.23200196,
+    0.22930941, 0.22649861, 0.22358618, 0.22057215, 0.21739359, 0.2141051,
+    0.21063096, 0.20699978, 0.20316855, 0.19908944, 0.19471496, 0.19000133,
+    0.1847768 , 0.17886461, 0.17199335, 0.16449989, 0.15797658, 0.15243265,
+    0.1469976 , 0.14126571, 0.13521428, 0.12907937, 0.12349501, 0.11987508,
+    0.12047985, 0.12617587, 0.1352476 , 0.14599545, 0.15747549, 0.16925814,
+    0.18117203, 0.19298222, 0.20476193, 0.21648737, 0.22808422, 0.23953607,
+    0.25094286, 0.26235208, 0.27357466, 0.28472765, 0.29596437, 0.30689052,
+    0.31799337, 0.32882896, 0.33982619, 0.35056047, 0.36151349, 0.37212065,
+    0.38293088, 0.39355561, 0.40419893, 0.41491671, 0.42538398, 0.43601357,
+    0.44654349, 0.4569744 , 0.46755003, 0.47796579, 0.48832714, 0.49881686,
+    0.50919369, 0.51945918, 0.52983799, 0.54024888, 0.55039796, 0.56064657,
+    0.57099376, 0.58117647, 0.59128027, 0.60146991, 0.6117441 , 0.62177734,
+    0.63179964, 0.64189483, 0.65206133, 0.66202135, 0.67193144, 0.68190219,
+    0.69193176, 0.70188796, 0.71165938, 0.72147981, 0.73134717, 0.74125921,
+    0.75096714, 0.76061378, 0.77029569, 0.78001026, 0.78975461, 0.79927776,
+    0.80875027, 0.81824276, 0.82775166, 0.83727291, 0.84665036, 0.85588868,
+    0.86512674, 0.87435845, 0.88357649, 0.89277169, 0.90169092, 0.91054951,
+    0.91934683, 0.92805132, 0.93660318, 0.9448656 ]), np.ones(256))
 
 # Aliases
 color_map_luts['B-W LINEAR'] = color_map_luts['idl00']

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -12,7 +12,6 @@
 #-----------------------------------------------------------------------------
 import numpy as np
 
-import matplotlib
 import matplotlib.colors as cc
 import matplotlib.cm as mcm
 from . import _colormap_data as _cm
@@ -72,9 +71,6 @@
 add_cmap('bds_highcontrast', cdict)
 add_cmap('algae', cdict)
 
-# Set the default colormap to be algae.
-matplotlib.rc('image', cmap="algae")
-
 # This next colormap was designed by Tune Kamae and converted here by Matt
 _vs = np.linspace(0,1,255)
 _kamae_red = np.minimum(255,

diff -r 203ee09e51eeffbab44fb6802c996d4ba106a264 -r 483eb61849ed48787ebef3d848f6c1378a169b79 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -19,6 +19,8 @@
 import matplotlib.pyplot as plt
 from ._mpl_imports import FigureCanvasAgg
 
+from yt.config import \
+    ytcfg
 from yt.utilities.logger import ytLogger as mylog
 from .plot_window import PlotWindow
 from .profile_plotter import PhasePlot, ProfilePlot
@@ -725,7 +727,7 @@
             if plot.cmap is not None:
                 _cmap = plot.cmap.name
         if _cmap is None:
-            _cmap = 'algae'
+            _cmap = ytcfg.get("yt", "default_colormap")
         if isinstance(plot, (PlotWindow, PhasePlot)):
             if isinstance(plot, PlotWindow):
                 try:
@@ -1345,7 +1347,7 @@
     return d
 
 #=============================================================================
-def return_cmap(cmap="algae", label="", range=(0,1), log=False):
+def return_cmap(cmap=None, label="", range=(0,1), log=False):
     r"""Returns a dict that describes a colorbar.  Exclusively for use with
     multiplot.
 
@@ -1364,5 +1366,7 @@
     --------
     >>> cb = return_cmap("algae", "Density [cm$^{-3}$]", (0,10), False)
     """
+    if cmap is None:
+        cmap = ytcfg.get("yt", "default_colormap")
     return {'cmap': cmap, 'name': label, 'range': range, 'log': log}
     

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/c92814015eed/
Changeset:   c92814015eed
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:02:27+00:00
Summary:     Import from libc instead
Affected #:  1 file

diff -r 483eb61849ed48787ebef3d848f6c1378a169b79 -r c92814015eedcfc0c6e65b04d0883d6fa2bc9e8f yt/analysis_modules/ppv_cube/ppv_utils.pyx
--- a/yt/analysis_modules/ppv_cube/ppv_utils.pyx
+++ b/yt/analysis_modules/ppv_cube/ppv_utils.pyx
@@ -2,11 +2,7 @@
 cimport numpy as np
 cimport cython
 from yt.utilities.physical_constants import kboltz
-
-cdef extern from "math.h":
-    double exp(double x) nogil
-    double fabs(double x) nogil
-    double sqrt(double x) nogil
+from libc.math cimport exp, fabs, sqrt
 
 cdef double kb = kboltz.v
 cdef double pi = np.pi


https://bitbucket.org/yt_analysis/yt/commits/8fec410491bc/
Changeset:   8fec410491bc
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:03:45+00:00
Summary:     Import alloca from platform_dep.h
Affected #:  4 files

diff -r c92814015eedcfc0c6e65b04d0883d6fa2bc9e8f -r 8fec410491bc08a10f31a0c1c169d0d9b2792ff6 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -18,7 +18,7 @@
 cimport cython
 from libc.stdlib cimport malloc, free
 
-cdef extern from "stdlib.h":
+cdef extern from "platform_dep.h":
     # NOTE that size_t might not be int
     void *alloca(int)
 

diff -r c92814015eedcfc0c6e65b04d0883d6fa2bc9e8f -r 8fec410491bc08a10f31a0c1c169d0d9b2792ff6 yt/utilities/lib/basic_octree.pyx
--- a/yt/utilities/lib/basic_octree.pyx
+++ b/yt/utilities/lib/basic_octree.pyx
@@ -25,7 +25,7 @@
 
 import sys, time
 
-cdef extern from "stdlib.h":
+cdef extern from "platform_dep.h":
     # NOTE that size_t might not be int
     void *alloca(int)
 

diff -r c92814015eedcfc0c6e65b04d0883d6fa2bc9e8f -r 8fec410491bc08a10f31a0c1c169d0d9b2792ff6 yt/utilities/lib/quad_tree.pyx
--- a/yt/utilities/lib/quad_tree.pyx
+++ b/yt/utilities/lib/quad_tree.pyx
@@ -24,7 +24,7 @@
 
 from yt.utilities.exceptions import YTIntDomainOverflow
 
-cdef extern from "stdlib.h":
+cdef extern from "platform_dep.h":
     # NOTE that size_t might not be int
     void *alloca(int)
 

diff -r c92814015eedcfc0c6e65b04d0883d6fa2bc9e8f -r 8fec410491bc08a10f31a0c1c169d0d9b2792ff6 yt/utilities/spatial/ckdtree.pyx
--- a/yt/utilities/spatial/ckdtree.pyx
+++ b/yt/utilities/spatial/ckdtree.pyx
@@ -7,7 +7,7 @@
 
 import kdtree
 
-cdef extern from "stdlib.h":
+cdef extern from "platform_dep.h":
     # NOTE that size_t might not be int
     void *alloca(int)
 


https://bitbucket.org/yt_analysis/yt/commits/876b5f4c8f2f/
Changeset:   876b5f4c8f2f
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:11:08+00:00
Summary:     Implementation of tsearch
Affected #:  3 files

diff -r 8fec410491bc08a10f31a0c1c169d0d9b2792ff6 -r 876b5f4c8f2faf68f8dc6d1df39f7535be3ff0f2 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -97,7 +97,7 @@
 cdef class RAMSESOctreeContainer(SparseOctreeContainer):
     pass
 
-cdef extern from "search.h" nogil:
+cdef extern from "tsearch.h" nogil:
     void *tsearch(const void *key, void **rootp,
                     int (*compar)(const void *, const void *))
     void *tfind(const void *key, const void **rootp,

diff -r 8fec410491bc08a10f31a0c1c169d0d9b2792ff6 -r 876b5f4c8f2faf68f8dc6d1df39f7535be3ff0f2 yt/utilities/lib/tsearch.c
--- /dev/null
+++ b/yt/utilities/lib/tsearch.c
@@ -0,0 +1,113 @@
+/*
+ * Tree search generalized from Knuth (6.2.2) Algorithm T just like
+ * the AT&T man page says.
+ *
+ * The node_t structure is for internal use only, lint doesn't grok it.
+ *
+ * Written by reading the System V Interface Definition, not the code.
+ *
+ * Totally public domain.
+ */
+/*LINTLIBRARY*/
+
+#include "tsearch.h"
+#include <stdlib.h>
+
+typedef struct node_t {
+    char	  *key;
+    struct node_t *left, *right;
+} node;
+
+/* find or insert datum into search tree */
+void *
+tsearch(const void *vkey, void **vrootp,
+    int (*compar)(const void *, const void *))
+{
+    node *q;
+    char *key = (char *)vkey;
+    node **rootp = (node **)vrootp;
+
+    if (rootp == (struct node_t **)0)
+	return ((void *)0);
+    while (*rootp != (struct node_t *)0) {	/* Knuth's T1: */
+	int r;
+
+	if ((r = (*compar)(key, (*rootp)->key)) == 0)	/* T2: */
+	    return ((void *)*rootp);		/* we found it! */
+	rootp = (r < 0) ?
+	    &(*rootp)->left :		/* T3: follow left branch */
+	    &(*rootp)->right;		/* T4: follow right branch */
+    }
+    q = (node *) malloc(sizeof(node));	/* T5: key not found */
+    if (q != (struct node_t *)0) {	/* make new node */
+	*rootp = q;			/* link new node to old */
+	q->key = key;			/* initialize new node */
+	q->left = q->right = (struct node_t *)0;
+    }
+    return ((void *)q);
+}
+/* find datum in search tree */
+void *
+tfind(const void *vkey, void **vrootp,
+    int (*compar)(const void *, const void *))
+{
+
+    char *key = (char *)vkey;
+    node **rootp = (node **)vrootp;
+
+    if (rootp == (struct node_t **)0)
+	return ((void *)0);
+    while (*rootp != (struct node_t *)0) {	/* Knuth's T1: */
+	int r;
+
+	if ((r = (*compar)(key, (*rootp)->key)) == 0)	/* T2: */
+	    return ((void *)*rootp);		/* we found it! */
+	rootp = (r < 0) ?
+	    &(*rootp)->left :		/* T3: follow left branch */
+	    &(*rootp)->right;		/* T4: follow right branch */
+    }
+    return ((void *)0);	/* T5: key not found */
+}
+
+
+/* delete node with given key */
+void *
+tdelete(const void *vkey, void **vrootp,
+    int (*compar)(const void *, const void *))
+{
+    node **rootp = (node **)vrootp;
+    char *key = (char *)vkey;
+    node *p = (node *)1;
+    node *q;
+    node *r;
+    int cmp;
+
+    if (rootp == (struct node_t **)0 || *rootp == (struct node_t *)0)
+	return ((struct node_t *)0);
+    while ((cmp = (*compar)(key, (*rootp)->key)) != 0) {
+	p = *rootp;
+	rootp = (cmp < 0) ?
+	    &(*rootp)->left :		/* follow left branch */
+	    &(*rootp)->right;		/* follow right branch */
+	if (*rootp == (struct node_t *)0)
+	    return ((void *)0);		/* key not found */
+    }
+    r = (*rootp)->right;			/* D1: */
+    if ((q = (*rootp)->left) == (struct node_t *)0)	/* Left (struct node_t *)0? */
+	q = r;
+    else if (r != (struct node_t *)0) {		/* Right link is null? */
+	if (r->left == (struct node_t *)0) {	/* D2: Find successor */
+	    r->left = q;
+	    q = r;
+	} else {			/* D3: Find (struct node_t *)0 link */
+	    for (q = r->left; q->left != (struct node_t *)0; q = r->left)
+		r = q;
+	    r->left = q->right;
+	    q->left = (*rootp)->left;
+	    q->right = (*rootp)->right;
+	}
+    }
+    free((struct node_t *) *rootp);	/* D4: Free node */
+    *rootp = q;				/* link parent to new node */
+    return(p);
+}

diff -r 8fec410491bc08a10f31a0c1c169d0d9b2792ff6 -r 876b5f4c8f2faf68f8dc6d1df39f7535be3ff0f2 yt/utilities/lib/tsearch.h
--- /dev/null
+++ b/yt/utilities/lib/tsearch.h
@@ -0,0 +1,26 @@
+/*
+ * Tree search generalized from Knuth (6.2.2) Algorithm T just like
+ * the AT&T man page says.
+ *
+ * The node_t structure is for internal use only, lint doesn't grok it.
+ *
+ * Written by reading the System V Interface Definition, not the code.
+ *
+ * Totally public domain.
+ */
+/*LINTLIBRARY*/
+
+#ifndef TSEARCH_H
+#define TSEARCH_H
+
+void * tsearch(const void *vkey, void **vrootp,
+    int (*compar)(const void *, const void *));
+
+void * tfind(const void *vkey, void **vrootp,
+    int (*compar)(const void *, const void *));
+
+void * tdelete(const void *vkey, void **vrootp,
+    int (*compar)(const void *, const void *));
+
+
+#endif
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/b81200f63fb9/
Changeset:   b81200f63fb9
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:12:15+00:00
Summary:     Ignore *.pyd files on Windows
Affected #:  1 file

diff -r 876b5f4c8f2faf68f8dc6d1df39f7535be3ff0f2 -r b81200f63fb9f7e4634ab3e09608c3f3d30e822e .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -62,6 +62,7 @@
 yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
+*.pyd
 .*.swp
 *.so
 .idea/*


https://bitbucket.org/yt_analysis/yt/commits/31486edd4023/
Changeset:   31486edd4023
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:13:02+00:00
Summary:     Import alloca from platform_dep.h
Affected #:  1 file

diff -r b81200f63fb9f7e4634ab3e09608c3f3d30e822e -r 31486edd402380a02965046bcadde16a81f5cce6 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -30,7 +30,7 @@
 from cpython cimport buffer
 
 
-cdef extern from "stdlib.h":
+cdef extern from "platform_dep.h":
     # NOTE that size_t might not be int
     void *alloca(int)
 


https://bitbucket.org/yt_analysis/yt/commits/e6b5baca60fe/
Changeset:   e6b5baca60fe
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:13:58+00:00
Summary:     Define additional macros
Affected #:  1 file

diff -r 31486edd402380a02965046bcadde16a81f5cce6 -r e6b5baca60fe7077100a6ae2a86d315a30f9de25 yt/utilities/lib/platform_dep.h
--- a/yt/utilities/lib/platform_dep.h
+++ b/yt/utilities/lib/platform_dep.h
@@ -1,4 +1,4 @@
-#if defined(WIN32) || defined(WIN64)
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
 #include "malloc.h"
 #else
 #include "alloca.h"


https://bitbucket.org/yt_analysis/yt/commits/71f9c250ab80/
Changeset:   71f9c250ab80
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:15:01+00:00
Summary:     If compiling with MSVC, don't (and can't) link against lm
Affected #:  1 file

diff -r e6b5baca60fe7077100a6ae2a86d315a30f9de25 -r 71f9c250ab80d8db267324e3ea62be13598a130f setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,51 +41,56 @@
 else:
     omp_args = None
 
+if os.name == "nt":
+    libs = []
+else:
+    libs = ["m"]
 
 cython_extensions = [
     Extension("yt.analysis_modules.photon_simulator.utils",
               ["yt/analysis_modules/photon_simulator/utils.pyx"]),
     Extension("yt.analysis_modules.ppv_cube.ppv_utils",
               ["yt/analysis_modules/ppv_cube/ppv_utils.pyx"],
-              libraries=["m"]),
+              libraries=libs),
     Extension("yt.geometry.grid_visitors",
               ["yt/geometry/grid_visitors.pyx"],
               include_dirs=["yt/utilities/lib"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/grid_visitors.pxd"]),
     Extension("yt.geometry.grid_container",
               ["yt/geometry/grid_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/grid_container.pxd",
                        "yt/geometry/grid_visitors.pxd"]),
     Extension("yt.geometry.oct_container",
-              ["yt/geometry/oct_container.pyx"],
+              ["yt/geometry/oct_container.pyx",
+               "yt/utilities/lib/tsearch.c"],
               include_dirs=["yt/utilities/lib"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.oct_visitors",
               ["yt/geometry/oct_visitors.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.particle_oct_container",
               ["yt/geometry/particle_oct_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.selection_routines",
               ["yt/geometry/selection_routines.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/grid_traversal.pxd",
                        "yt/geometry/oct_container.pxd",
@@ -96,7 +101,7 @@
     Extension("yt.geometry.particle_deposit",
               ["yt/geometry/particle_deposit.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd",
@@ -104,7 +109,7 @@
     Extension("yt.geometry.particle_smooth",
               ["yt/geometry/particle_smooth.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd",
@@ -113,28 +118,30 @@
     Extension("yt.geometry.fake_octree",
               ["yt/geometry/fake_octree.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.utilities.spatial.ckdtree",
               ["yt/utilities/spatial/ckdtree.pyx"],
-              libraries=["m"]),
+              include_dirs=["yt/utilities/lib/"],
+              libraries=libs),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
-              libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"]),
+              libraries=libs, depends=["yt/utilities/lib/bitarray.pxd"]),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+              include_dirs=["yt/utilities/lib/"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
                        "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
                             "yt/geometry/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/amr_kdtools.pxd",
                        "yt/utilities/lib/grid_traversal.pxd",
@@ -144,12 +151,12 @@
               ["yt/utilities/lib/geometry_utils.pyx"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"]),
+              libraries=libs, depends=["yt/utilities/lib/fp_utils.pxd"]),
     Extension("yt.utilities.lib.marching_cubes",
               ["yt/utilities/lib/marching_cubes.pyx",
                "yt/utilities/lib/fixed_interpolator.c"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/fixed_interpolator.pxd",
                        "yt/utilities/lib/fixed_interpolator.h",
@@ -159,7 +166,7 @@
                "yt/utilities/lib/pixelization_constants.c"],
               include_dirs=["yt/utilities/lib/"],
               language="c++",
-              libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd",
+              libraries=libs, depends=["yt/utilities/lib/fp_utils.pxd",
                                         "yt/utilities/lib/pixelization_constants.h",
                                         "yt/utilities/lib/element_mappings.pxd"]),
     Extension("yt.utilities.lib.origami",
@@ -172,7 +179,7 @@
                "yt/utilities/lib/fixed_interpolator.c",
                "yt/utilities/lib/kdtree.c"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=libs,
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               depends=["yt/utilities/lib/fp_utils.pxd",
@@ -183,10 +190,10 @@
                        "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.element_mappings",
               ["yt/utilities/lib/element_mappings.pyx"],
-              libraries=["m"], depends=["yt/utilities/lib/element_mappings.pxd"]),
+              libraries=libs, depends=["yt/utilities/lib/element_mappings.pxd"]),
     Extension("yt.utilities.lib.alt_ray_tracers",
               ["yt/utilities/lib/alt_ray_tracers.pyx"],
-              libraries=["m"]),
+              libraries=libs),
 ]
 
 lib_exts = [
@@ -199,7 +206,7 @@
     cython_extensions.append(
         Extension("yt.utilities.lib.{}".format(ext_name),
                   ["yt/utilities/lib/{}.pyx".format(ext_name)],
-                  libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"]))
+                  libraries=libs, depends=["yt/utilities/lib/fp_utils.pxd"]))
 
 lib_exts = ["write_array", "ragged_arrays", "line_integral_convolution"]
 for ext_name in lib_exts:
@@ -211,7 +218,7 @@
     Extension("yt.analysis_modules.halo_finding.fof.EnzoFOF",
               ["yt/analysis_modules/halo_finding/fof/EnzoFOF.c",
                "yt/analysis_modules/halo_finding/fof/kd.c"],
-              libraries=["m"]),
+              libraries=libs),
     Extension("yt.analysis_modules.halo_finding.hop.EnzoHop",
               glob.glob("yt/analysis_modules/halo_finding/hop/*.c")),
     Extension("yt.frontends.artio._artio_caller",
@@ -229,10 +236,10 @@
               glob.glob("yt/utilities/spatial/src/*.c")),
     Extension("yt.visualization._MPL",
               ["yt/visualization/_MPL.c"],
-              libraries=["m"]),
+              libraries=libs),
     Extension("yt.utilities.data_point_utilities",
               ["yt/utilities/data_point_utilities.c"],
-              libraries=["m"]),
+              libraries=libs),
 ]
 
 # EMBREE
@@ -263,7 +270,7 @@
         conda_basedir = os.path.dirname(os.path.dirname(sys.executable))
         embree_inc_dir.append(os.path.join(conda_basedir, 'include'))
         embree_lib_dir.append(os.path.join(conda_basedir, 'lib'))
-        
+
     if _platform == "darwin":
         embree_lib_name = "embree.2"
     else:
@@ -273,7 +280,8 @@
         ext.include_dirs += embree_inc_dir
         ext.library_dirs += embree_lib_dir
         ext.language = "c++"
-        ext.libraries += ["m", embree_lib_name]
+        ext.libraries += libs
+        ext.libraries += [embree_lib_name]
 
     cython_extensions += embree_extensions
 


https://bitbucket.org/yt_analysis/yt/commits/2384221e4855/
Changeset:   2384221e4855
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:15:51+00:00
Summary:     Define more macros
Affected #:  1 file

diff -r 71f9c250ab80d8db267324e3ea62be13598a130f -r 2384221e4855b61debbd9b46f2a41bb4db8bef91 yt/utilities/lib/kdtree.c
--- a/yt/utilities/lib/kdtree.c
+++ b/yt/utilities/lib/kdtree.c
@@ -31,7 +31,7 @@
 #include <math.h>
 #include "kdtree.h"
 
-#if defined(WIN32) || defined(__WIN32__)
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
 #include <malloc.h>
 #endif
 


https://bitbucket.org/yt_analysis/yt/commits/fc8363a63b0b/
Changeset:   fc8363a63b0b
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:17:37+00:00
Summary:     Make fof compile with MSVC
Affected #:  2 files

diff -r 2384221e4855b61debbd9b46f2a41bb4db8bef91 -r fc8363a63b0b3af8bf7c4c50690258a32e4cf172 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -28,14 +28,20 @@
 Py_EnzoFOF(PyObject *obj, PyObject *args)
 {
     PyObject    *oxpos, *oypos, *ozpos;
-
     PyArrayObject    *xpos, *ypos, *zpos;
-    xpos=ypos=zpos=NULL;
     float link = 0.2;
     float fPeriod[3] = {1.0, 1.0, 1.0};
 	int nMembers = 8;
+    int i, num_particles;
+	KDFOF kd;
+	int nBucket,j;
+	float fEps;
+	int nGroup,bVerbose=1;
+	int sec,usec;
+	PyArrayObject *particle_group_id;
+    PyObject *return_value;
 
-    int i;
+    xpos=ypos=zpos=NULL;
 
     if (!PyArg_ParseTuple(args, "OOO|f(fff)i",
         &oxpos, &oypos, &ozpos, &link,
@@ -54,7 +60,7 @@
              "EnzoFOF: xpos didn't work.");
     goto _fail;
     }
-    int num_particles = PyArray_SIZE(xpos);
+    num_particles = PyArray_SIZE(xpos);
 
     ypos    = (PyArrayObject *) PyArray_FromAny(oypos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
@@ -75,12 +81,6 @@
     }
 
     /* let's get started with the FOF stuff */
-
-	KDFOF kd;
-	int nBucket,j;
-	float fEps;
-	int nGroup,bVerbose=1;
-	int sec,usec;
 	
 	/* linking length */
 	fprintf(stdout, "Link length is %f\n", link);
@@ -128,7 +128,7 @@
     // All we need to do is group information.
     
     // Tags are in kd->p[i].iGroup
-    PyArrayObject *particle_group_id = (PyArrayObject *)
+    particle_group_id = (PyArrayObject *)
             PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos),
                     PyArray_DescrFromType(NPY_INT32));
     
@@ -142,7 +142,7 @@
 
     PyArray_UpdateFlags(particle_group_id,
         NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
-    PyObject *return_value = Py_BuildValue("N", particle_group_id);
+    return_value = Py_BuildValue("N", particle_group_id);
 
     Py_DECREF(xpos);
     Py_DECREF(ypos);

diff -r 2384221e4855b61debbd9b46f2a41bb4db8bef91 -r fc8363a63b0b3af8bf7c4c50690258a32e4cf172 yt/analysis_modules/halo_finding/fof/kd.c
--- a/yt/analysis_modules/halo_finding/fof/kd.c
+++ b/yt/analysis_modules/halo_finding/fof/kd.c
@@ -1,11 +1,11 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
-#include <sys/time.h>
-#if defined(WIN32) || defined(WIN64) 
-#include <windows.h> 
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+#include <windows.h>
 #else
 #include <sys/resource.h>
+#include <sys/time.h>
 #endif
 #include <assert.h>
 #include "kd.h"
@@ -15,7 +15,7 @@
 void kdTimeFoF(KDFOF kd,int *puSecond,int *puMicro)
 {
 
-#if defined(WIN32) || defined(WIN64)
+#if defined(WIN32) || defined(WIN64) || defined(_WIN64)
         int secs, usecs;
         HANDLE hProcess = GetCurrentProcess();
 	FILETIME ftCreation, ftExit, ftKernel, ftUser;


https://bitbucket.org/yt_analysis/yt/commits/d9a59d7c4b98/
Changeset:   d9a59d7c4b98
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:19:26+00:00
Summary:     Make hop compile with MSVC
Affected #:  5 files

diff -r fc8363a63b0b3af8bf7c4c50690258a32e4cf172 -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -37,6 +37,7 @@
     PyArrayObject **xpos, PyArrayObject **ypos, PyArrayObject **zpos,
       PyArrayObject **mass)
 {
+    int num_particles;
 
     /* First the regular source arrays */
 
@@ -48,7 +49,7 @@
              "EnzoHop: xpos didn't work.");
     return -1;
     }
-    int num_particles = PyArray_SIZE(*xpos);
+    num_particles = PyArray_SIZE(*xpos);
 
     *ypos    = (PyArrayObject *) PyArray_FromAny(oypos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
@@ -90,19 +91,25 @@
 
     PyArrayObject    *xpos, *ypos, *zpos,
                      *mass;
-    xpos=ypos=zpos=mass=NULL;
     npy_float64 totalmass = 0.0;
     float normalize_to = 1.0;
     float thresh = 160.0;
+    int i, num_particles;
+    KD kd;
+    int nBucket = 16, kdcount = 0;
+    PyArrayObject *particle_density;
+    HC my_comm;
+    PyArrayObject *particle_group_id;
+    PyObject *return_value;
 
-    int i;
+    xpos=ypos=zpos=mass=NULL;
 
     if (!PyArg_ParseTuple(args, "OOOO|ff",
         &oxpos, &oypos, &ozpos, &omass, &thresh, &normalize_to))
     return PyErr_Format(_HOPerror,
             "EnzoHop: Invalid parameters.");
 
-    int num_particles = convert_particle_arrays(
+    num_particles = convert_particle_arrays(
             oxpos, oypos, ozpos, omass,
             &xpos, &ypos, &zpos, &mass);
     if (num_particles < 0) goto _fail;
@@ -113,8 +120,6 @@
 
   /* initialize the kd hop structure */
 
-  KD kd;
-  int nBucket = 16, kdcount = 0;
   kdInit(&kd, nBucket);
   kd->nActive = num_particles;
   kd->p = malloc(sizeof(PARTICLE)*num_particles);
@@ -124,7 +129,7 @@
   }
   
  	/* Copy positions into kd structure. */
-    PyArrayObject *particle_density = (PyArrayObject *)
+    particle_density = (PyArrayObject *)
             PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos),
                     PyArray_DescrFromType(NPY_FLOAT64));
 
@@ -137,7 +142,6 @@
     kd->totalmass = totalmass;
 	for (i = 0; i < num_particles; i++) kd->p[i].np_index = i;
 
-    HC my_comm;
     my_comm.s = newslice();
     my_comm.gl = (Grouplist*)malloc(sizeof(Grouplist));
     if(my_comm.gl == NULL) {
@@ -159,7 +163,7 @@
     // All we need to do is provide density and group information.
     
     // Tags (as per writetagsf77) are in gl.s->ntag+1 and there are gl.s->numlist of them.
-    PyArrayObject *particle_group_id = (PyArrayObject *)
+    particle_group_id = (PyArrayObject *)
             PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos),
                     PyArray_DescrFromType(NPY_INT32));
     
@@ -175,7 +179,7 @@
 
     PyArray_UpdateFlags(particle_density, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_density));
     PyArray_UpdateFlags(particle_group_id, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
-    PyObject *return_value = Py_BuildValue("NN", particle_density, particle_group_id);
+    return_value = Py_BuildValue("NN", particle_density, particle_group_id);
 
     Py_DECREF(xpos);
     Py_DECREF(ypos);
@@ -231,6 +235,8 @@
                              "nbuckets", "norm", NULL};
     PyObject    *oxpos, *oypos, *ozpos,
                 *omass;
+    npy_float64 totalmass = 0.0;
+
     self->xpos=self->ypos=self->zpos=self->mass=NULL;
 
 
@@ -257,7 +263,6 @@
             PyArray_SimpleNewFromDescr(1, PyArray_DIMS(self->xpos),
                     PyArray_DescrFromType(NPY_FLOAT64));
 
-    npy_float64 totalmass = 0.0;
     for(i= 0; i < self->num_particles; i++) {
         self->kd->p[i].np_index = i;
         *(npy_float64*)(PyArray_GETPTR1(self->densities, i)) = 0.0;
@@ -319,7 +324,8 @@
 
 static PyObject *
 kDTreeType_median_jst(kDTreeType *self, PyObject *args) {
-    int d, l, u;
+    int d, l, u, median;
+    PyObject *omedian;
 
     if (!PyArg_ParseTuple(args, "iii", &d, &l, &u))
         return PyErr_Format(_HOPerror,
@@ -337,9 +343,9 @@
         return PyErr_Format(_HOPerror,
             "kDTree.median_jst: u cannot be >= num_particles!");
 
-    int median = kdMedianJst(self->kd, d, l, u);
+    median = kdMedianJst(self->kd, d, l, u);
 
-    PyObject *omedian = PyLong_FromLong((long)median);
+    omedian = PyLong_FromLong((long)median);
     return omedian;
 }
 

diff -r fc8363a63b0b3af8bf7c4c50690258a32e4cf172 -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 yt/analysis_modules/halo_finding/hop/hop_hop.c
--- a/yt/analysis_modules/halo_finding/hop/hop_hop.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_hop.c
@@ -15,6 +15,9 @@
  
 #include <stdio.h>
 #include <stdlib.h>
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+#define _USE_MATH_DEFINES
+#endif
 #include <math.h>
 #include <string.h>
 #include <ctype.h>
@@ -551,13 +554,13 @@
 {
     int j, den;
     Boundary *hp;
- 
+    int nb = 0;
+
     my_comm->gdensity = vector(0,smx->nGroups-1);
     for (j=0;j<smx->nGroups;j++) {
         den = smx->densestingroup[j];
 	    my_comm->gdensity[j]=NP_DENS(smx->kd, den);
     }
-    int nb = 0;
     for (j=0, hp=smx->hash;j<smx->nHashLength; j++,hp++)
 	if (hp->nGroup1>=0)nb++;
     my_comm->ngroups = smx->nGroups;

diff -r fc8363a63b0b3af8bf7c4c50690258a32e4cf172 -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 yt/analysis_modules/halo_finding/hop/hop_kd.c
--- a/yt/analysis_modules/halo_finding/hop/hop_kd.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_kd.c
@@ -12,10 +12,10 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
-#include <sys/time.h>
-#if defined(WIN32) || defined(WIN64) 
+#if defined(WIN32) || defined(WIN64) || defined(_WIN32) || defined(_WIN64)
 #include <windows.h> 
 #else
+#include <sys/time.h>
 #include <sys/resource.h>
 #endif
 #include <assert.h>

diff -r fc8363a63b0b3af8bf7c4c50690258a32e4cf172 -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 yt/analysis_modules/halo_finding/hop/hop_regroup.c
--- a/yt/analysis_modules/halo_finding/hop/hop_regroup.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_regroup.c
@@ -411,6 +411,10 @@
     FILE *fp;
     FILE *boundfp;
     float *gdensity = my_comm->gdensity;
+    int *g1temp,*g2temp;
+    float *denstemp;
+    int temppos = 0;
+
     ngroups = my_comm->ngroups;
 
     if (densthresh<MINDENS) densthresh=MINDENS;
@@ -446,13 +450,11 @@
        the arrays should be no larger than my_comm->nb. 
        Skory.
     */
-    int *g1temp,*g2temp;
-    float *denstemp;
+
     g1temp = (int *)malloc(sizeof(int) * my_comm->nb);
     g2temp = (int *)malloc(sizeof(int) * my_comm->nb);
     denstemp = (float *)malloc(sizeof(float) * my_comm->nb);
     
-    int temppos = 0;
     for(j=0;j<(my_comm->nb);j++) {
     g1 = my_comm->g1vec[j];
     g2 = my_comm->g2vec[j];

diff -r fc8363a63b0b3af8bf7c4c50690258a32e4cf172 -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 yt/analysis_modules/halo_finding/hop/hop_smooth.c
--- a/yt/analysis_modules/halo_finding/hop/hop_smooth.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_smooth.c
@@ -15,6 +15,9 @@
  
 #include <stdio.h>
 #include <stdlib.h>
+#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+#define _USE_MATH_DEFINES
+#endif
 #include <math.h>
 #include <assert.h>
 #include "smooth.h"


https://bitbucket.org/yt_analysis/yt/commits/a9e2ae6d4825/
Changeset:   a9e2ae6d4825
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:23:04+00:00
Summary:     Define our own versions of these functions for MSVC
Affected #:  6 files

diff -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 -r a9e2ae6d4825999f850a9c65c2e17007a1e88685 yt/geometry/grid_container.pxd
--- a/yt/geometry/grid_container.pxd
+++ b/yt/geometry/grid_container.pxd
@@ -18,7 +18,6 @@
 cimport cython
 
 from libc.stdlib cimport malloc, free
-from libc.math cimport nearbyint, rint
 from yt.geometry.selection_routines cimport SelectorObject, _ensure_code
 from yt.utilities.lib.fp_utils cimport iclip
 from grid_visitors cimport GridTreeNode, GridVisitorData, \
@@ -64,3 +63,6 @@
 			 np.float64_t y,
 			 np.float64_t z,
 			 GridTreeNode *grid)
+
+cdef extern from "math_utils.h" nogil:
+    double rint(double x)
\ No newline at end of file

diff -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 -r a9e2ae6d4825999f850a9c65c2e17007a1e88685 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -1,11 +1,15 @@
 cimport cython 
 import numpy as np
 cimport numpy as np
-from libc.math cimport fabs, fmax, fmin
+from libc.math cimport fabs
 from libc.stdlib cimport malloc, free
 from cython.parallel import parallel, prange
 from vec3_ops cimport dot, subtract, cross
 
+cdef extern from "math_utils.h" nogil:
+    double fmax(double x, double y)
+    double fmin(double x, double y)
+
 cdef extern from "mesh_construction.h":
     enum:
         MAX_NUM_TRI

diff -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 -r a9e2ae6d4825999f850a9c65c2e17007a1e88685 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -18,8 +18,10 @@
 from numpy cimport ndarray
 cimport cython
 import numpy as np
-from libc.math cimport fabs, fmax
+from libc.math cimport fabs
 
+cdef extern from "math_utils.h":
+    double fmax(double x, double y) nogil
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 -r a9e2ae6d4825999f850a9c65c2e17007a1e88685 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -28,9 +28,11 @@
     double floor(double x) nogil
     double ceil(double x) nogil
     double fmod(double x, double y) nogil
+    double fabs(double x) nogil
+
+cdef extern from "math_utils.h":
     double log2(double x) nogil
     long int lrint(double x) nogil
-    double fabs(double x) nogil
 
 # Finally, miscellaneous routines.
 

diff -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 -r a9e2ae6d4825999f850a9c65c2e17007a1e88685 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -21,13 +21,16 @@
     cdef int SHRT_MAX
 from libc.stdlib cimport malloc, calloc, free, abs
 from libc.math cimport exp, floor, log2, \
-    lrint, fabs, atan, atan2, asin, cos, sin, sqrt, acos, M_PI
+    fabs, atan, atan2, asin, cos, sin, sqrt, acos, M_PI
 from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
 from field_interpolation_tables cimport \
     FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
     FIT_eval_transfer_with_light
 from fixed_interpolator cimport *
 
+cdef extern from "math_utils.h":
+    long int lrint(double x) nogil
+
 from cython.parallel import prange, parallel, threadid
 from vec3_ops cimport dot, subtract, L2_norm, fma
 

diff -r d9a59d7c4b983e22d915a53cd1bc5aa85d709be1 -r a9e2ae6d4825999f850a9c65c2e17007a1e88685 yt/utilities/lib/math_utils.h
--- /dev/null
+++ b/yt/utilities/lib/math_utils.h
@@ -0,0 +1,25 @@
+#include <math.h>
+#define isnormal(x) ((_fpclass(x) == _FPCLASS_NN) || (_fpclass(x) == _FPCLASS_PN))
+static __inline double rint(double x){
+    const double two_to_52 = 4.5035996273704960e+15;
+    double fa = fabs(x);
+    if(fa >= two_to_52){
+        return x;
+    } else{
+        return copysign(two_to_52 + fa - two_to_52, x);
+    }
+}
+static __inline long int lrint(double x){
+    return (long)rint(x);
+}
+static __inline double fmax(double x, double y){
+    return (x > y) ? x : y;
+}
+static __inline double fmin(double x, double y){
+    return (x < y) ? x : y;
+}
+static __inline double log2(double x) {
+    return log(x) * M_LOG2E;
+}
+
+


https://bitbucket.org/yt_analysis/yt/commits/e12dc3d2a9bb/
Changeset:   e12dc3d2a9bb
Branch:      yt
User:        jzuhone
Date:        2016-04-03 16:26:57+00:00
Summary:     Compile this with MSVC
Affected #:  1 file

diff -r a9e2ae6d4825999f850a9c65c2e17007a1e88685 -r e12dc3d2a9bb075930c8250fa8bb576d325b2a4f yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -46,13 +46,28 @@
             *grid_src_mask, *grid_src_wgt, *grid_used_mask;
     PyArrayObject    *grid_dst_x, *grid_dst_y, **grid_dst_vals,
             *grid_dst_mask, *grid_dst_wgt;
+    int NumArrays, src_len, dst_len, refinement_factor;
+    npy_float64 **src_vals;
+    npy_float64 **dst_vals;
+    PyObject *temp_object;
+    int i;
+
+    npy_int64 *src_x, *src_y, *src_mask, *src_used_mask;
+    npy_float64 *src_wgt;
+
+    npy_int64 *dst_x, *dst_y, *dst_mask;
+    npy_float64 *dst_wgt;
+
+    int si, di, x_off, y_off;
+    npy_int64  fine_x, fine_y, init_x, init_y;
+    int num_found = 0;
+    PyObject *onum_found;
 
     grid_src_x = grid_src_y = //grid_src_vals =
             grid_src_mask = grid_src_wgt = grid_used_mask =
     grid_dst_x = grid_dst_y = //grid_dst_vals = 
             grid_dst_mask = grid_dst_wgt = NULL;
 
-    int NumArrays, src_len, dst_len, refinement_factor;
     NumArrays = 0;
 
     if (!PyArg_ParseTuple(args, "OOOOOOOOOOiO",
@@ -154,10 +169,9 @@
 
     grid_src_vals = malloc(NumArrays * sizeof(PyArrayObject*));
     grid_dst_vals = malloc(NumArrays * sizeof(PyArrayObject*));
-    npy_float64 **src_vals = malloc(NumArrays * sizeof(npy_float64*));
-    npy_float64 **dst_vals = malloc(NumArrays * sizeof(npy_float64*));
-    PyObject *temp_object;
-    int i;
+    src_vals = malloc(NumArrays * sizeof(npy_float64*));
+    dst_vals = malloc(NumArrays * sizeof(npy_float64*));
+
     for (i = 0; i < NumArrays; i++) {
       temp_object = PySequence_GetItem(ogrid_src_vals, i);
       grid_src_vals[i] = (PyArrayObject *) PyArray_FromAny(
@@ -178,20 +192,16 @@
 
     /* Now we're all set to call our sub-function. */
 
-    npy_int64     *src_x    = (npy_int64 *) PyArray_GETPTR1(grid_src_x,0);
-    npy_int64     *src_y    = (npy_int64 *) PyArray_GETPTR1(grid_src_y,0);
-    npy_float64 *src_wgt  = (npy_float64 *) PyArray_GETPTR1(grid_src_wgt,0);
-    npy_int64     *src_mask = (npy_int64 *) PyArray_GETPTR1(grid_src_mask,0);
-    npy_int64    *src_used_mask = (npy_int64 *) PyArray_GETPTR1(grid_used_mask,0);
+    src_x    = (npy_int64 *) PyArray_GETPTR1(grid_src_x,0);
+    src_y    = (npy_int64 *) PyArray_GETPTR1(grid_src_y,0);
+    src_wgt  = (npy_float64 *) PyArray_GETPTR1(grid_src_wgt,0);
+    src_mask = (npy_int64 *) PyArray_GETPTR1(grid_src_mask,0);
+    src_used_mask = (npy_int64 *) PyArray_GETPTR1(grid_used_mask,0);
 
-    npy_int64     *dst_x    = (npy_int64 *) PyArray_GETPTR1(grid_dst_x,0);
-    npy_int64     *dst_y    = (npy_int64 *) PyArray_GETPTR1(grid_dst_y,0);
-    npy_float64 *dst_wgt  = (npy_float64 *) PyArray_GETPTR1(grid_dst_wgt,0);
-    npy_int64     *dst_mask = (npy_int64 *) PyArray_GETPTR1(grid_dst_mask,0);
-
-    int si, di, x_off, y_off;
-    npy_int64  fine_x, fine_y, init_x, init_y;
-    int num_found = 0;
+    dst_x    = (npy_int64 *) PyArray_GETPTR1(grid_dst_x,0);
+    dst_y    = (npy_int64 *) PyArray_GETPTR1(grid_dst_y,0);
+    dst_wgt  = (npy_float64 *) PyArray_GETPTR1(grid_dst_wgt,0);
+    dst_mask = (npy_int64 *) PyArray_GETPTR1(grid_dst_mask,0);
 
     for (si = 0; si < src_len; si++) {
       if (src_used_mask[si] == 0) continue;
@@ -244,7 +254,7 @@
     free(src_vals);
     free(dst_vals);
 
-    PyObject *onum_found = PYINTCONV_FROM((long)num_found);
+    onum_found = PYINTCONV_FROM((long)num_found);
     return onum_found;
 
 _fail:
@@ -298,14 +308,30 @@
              *oc_le, *oc_re, *oc_dx, *oc_data, *odr_edge, *odl_edge;
     PyArrayObject *g_le, *g_dx, *g_cm,
                   *c_le, *c_re, *c_dx, *dr_edge, *dl_edge;
-    g_dx=g_cm=c_le=c_re=c_dx=NULL;
     PyArrayObject **g_data, **c_data;
-    g_data = c_data = NULL;
     npy_int *ag_cm;
-    npy_float64 ag_le[3], ag_dx[3], 
+    npy_float64 ag_le[3], ag_dx[3],
                 ac_le[3], ac_re[3], ac_dx[3],
                 adl_edge[3], adr_edge[3];
     Py_ssize_t n_fields = 0;
+    PyObject *tc_data;
+    PyObject *tg_data;
+    npy_int64 xg, yg, zg, xc, yc, zc, cmax_x, cmax_y, cmax_z,
+              cmin_x, cmin_y, cmin_z, cm, pxl, pyl, pzl;
+    long int total=0;
+
+    int p_niter[3] = {1,1,1};
+    int itc;
+    npy_float64 ac_le_p[3][3];
+    npy_float64 ac_re_p[3][3];
+    npy_float64 ag_re[3];
+    npy_intp nx, ny, nz;
+    npy_int64 xg_min, yg_min, zg_min;
+    npy_int64 xg_max, yg_max, zg_max;
+    PyObject *status;
+
+    g_dx=g_cm=c_le=c_re=c_dx=NULL;
+    g_data = c_data = NULL;
 
     if (!PyArg_ParseTuple(args, "OOOOOOOOiOO",
             &og_le, &og_dx, &og_data, &og_cm,
@@ -411,7 +437,6 @@
       goto _fail;
     }
 
-    PyObject *tc_data;
     c_data = (PyArrayObject**)
              malloc(sizeof(PyArrayObject*)*n_fields);
     for (n=0;n<n_fields;n++)c_data[n]=NULL;
@@ -434,7 +459,6 @@
       goto _fail;
     }
 
-    PyObject *tg_data;
     g_data = (PyArrayObject**)
              malloc(sizeof(PyArrayObject*)*n_fields);
     for (n=0;n<n_fields;n++)g_data[n]=NULL;
@@ -455,15 +479,6 @@
 
     /* And let's begin */
 
-    npy_int64 xg, yg, zg, xc, yc, zc, cmax_x, cmax_y, cmax_z,
-              cmin_x, cmin_y, cmin_z, cm, pxl, pyl, pzl;
-    long int total=0;
-
-    int p_niter[3] = {1,1,1};
-    int itc;
-    npy_float64 ac_le_p[3][3];
-    npy_float64 ac_re_p[3][3];
-    npy_float64 ag_re[3];
     /* This is for checking for periodic boundary conditions.
        Manually set the right edge to be offset from the left. */
     for(i=0;i<3;i++){ag_re[i] = ag_le[i]+ag_dx[i]*(g_data[0]->dimensions[i]+1);}
@@ -481,13 +496,10 @@
             }
             p_niter[i] = itc;
     }
-    npy_intp nx, ny, nz;
     /* This is easier than doing a lookup every loop */
     nx = PyArray_DIM(c_data[0], 0);
     ny = PyArray_DIM(c_data[0], 1);
     nz = PyArray_DIM(c_data[0], 2);
-    npy_int64 xg_min, yg_min, zg_min;
-    npy_int64 xg_max, yg_max, zg_max;
 
     /* Periodic iterations, *if necessary* */
     for (pxl = 0; pxl < p_niter[0]; pxl++) {
@@ -556,7 +568,7 @@
     free(g_data);
     free(c_data);
 
-    PyObject *status = PYINTCONV_FROM(total);
+    status = PYINTCONV_FROM(total);
     return status;
     
 _fail:
@@ -626,12 +638,8 @@
              *oc_start, *og_start,
              *oc_dims, *og_dims, *omask;
     PyObject *tg_data, *tc_data, *dw_data;
-    oc_data = og_data = oc_start = og_start = oc_dims = og_dims = omask = NULL;
-    tg_data = tc_data = dw_data = NULL;
     PyArrayObject **g_data, **c_data, *mask,
                   *g_start, *c_start, *c_dims, *g_dims, *dwa;
-    mask = g_start = c_start = c_dims = g_dims = NULL;
-    g_data = c_data = NULL;
     int refratio, ll, direction, n;
     npy_int64 gxs, gys, gzs, gxe, gye, gze;
     npy_int64 cxs, cys, czs, cxe, cye, cze;
@@ -639,13 +647,22 @@
     npy_int64 gxi, gyi, gzi, cxi, cyi, czi;
     npy_int64 cdx, cdy, cdz;
     npy_int64 dw[3];
-    int i;
+    int i, n_fields;
     npy_int64 ci, cj, ck, ri, rj, rk;
     int total = 0;
+    PyObject *status;
+
     void (*to_call)(PyArrayObject* c_data, npy_int64 xc,
                          npy_int64 yc, npy_int64 zc,
                     PyArrayObject* g_data, npy_int64 xg,
                          npy_int64 yg, npy_int64 zg);
+
+    oc_data = og_data = oc_start = og_start = oc_dims = og_dims = omask = NULL;
+    tg_data = tc_data = dw_data = NULL;
+
+    mask = g_start = c_start = c_dims = g_dims = NULL;
+    g_data = c_data = NULL;
+
     if (!PyArg_ParseTuple(args, "iOOOOOOOOii",
             &refratio, &og_start, &oc_start,
             &oc_data, &og_data,
@@ -699,7 +716,7 @@
     }
     for (i=0;i<3;i++)dw[i] = *(npy_int64*) PyArray_GETPTR1(dwa, i);
 
-    int n_fields = PyList_Size(oc_data);
+    n_fields = PyList_Size(oc_data);
     if(n_fields == 0) {
       /*PyErr_Format(_dataCubeError,
           "CombineGrids: Length zero for c_data is invalid.");
@@ -806,7 +823,7 @@
     }
     free(g_data);
     free(c_data);
-    PyObject *status = PYINTCONV_FROM(total);
+    status = PYINTCONV_FROM(total);
     return status;
 
 _fail:
@@ -830,11 +847,8 @@
              *oc_start, *og_start,
              *oc_dims, *og_dims, *omask, *odls;
     PyObject *tg_data, *tc_data, *dw_data;
-    oc_data = og_data = oc_start = og_start = oc_dims = og_dims = omask = NULL;
-    tg_data = tc_data = dw_data = odls = NULL;
     PyArrayObject **g_data, **c_data, *mask,
                   *g_start, *c_start, *c_dims, *g_dims, *dwa;
-    mask = g_start = c_start = c_dims = g_dims = NULL;
     double *dls = NULL;
     int refratio, ll, direction, n;
     npy_int64 gxs, gys, gzs, gxe, gye, gze;
@@ -843,9 +857,17 @@
     npy_int64 gxi, gyi, gzi, cxi, cyi, czi;
     npy_int64 cdx, cdy, cdz;
     npy_int64 dw[3];
-    int i, axis;
+    int i, axis, n_fields;
     int ci, cj, ck, ri, rj, rk;
     int total = 0;
+    PyObject *temp = NULL;
+    int x_loc, y_loc; // For access into the buffer
+    PyObject *status;
+
+    oc_data = og_data = oc_start = og_start = oc_dims = og_dims = omask = NULL;
+    tg_data = tc_data = dw_data = odls = NULL;
+
+    mask = g_start = c_start = c_dims = g_dims = NULL;
 
     if (!PyArg_ParseTuple(args, "iOOOOOOOOOi",
             &refratio, &og_start, &oc_start,
@@ -897,7 +919,7 @@
     }
     for (i=0;i<3;i++)dw[i] = *(npy_int64*) PyArray_GETPTR1(dwa, i);
 
-    int n_fields = PyList_Size(oc_data);
+    n_fields = PyList_Size(oc_data);
     if(n_fields == 0) {
       PyErr_Format(_dataCubeError,
           "CombineGrids: Length zero for c_data is invalid.");
@@ -919,7 +941,6 @@
     g_data = (PyArrayObject**)
              malloc(sizeof(PyArrayObject*)*n_fields);
     dls = (double *) malloc(sizeof(double) * n_fields);
-    PyObject *temp = NULL;
     for (n=0;n<n_fields;n++)c_data[n]=g_data[n]=NULL;
     for (n=0;n<n_fields;n++){
       /* Borrowed reference ... */
@@ -974,7 +995,6 @@
     /* It turns out that C89 doesn't define a mechanism for choosing the sign
        of the remainder.
     */
-    int x_loc, y_loc; // For access into the buffer
     for(cxi=cxs;cxi<=cxe;cxi++) {
         ci = (cxi % dw[0]);
         ci = (ci < 0) ? ci + dw[0] : ci;
@@ -1020,7 +1040,7 @@
     if(dls!=NULL)free(dls);
     if(g_data!=NULL)free(g_data);
     if(c_data!=NULL)free(c_data);
-    PyObject *status = PYINTCONV_FROM(total);
+    status = PYINTCONV_FROM(total);
     return status;
 
 _fail:
@@ -1048,9 +1068,11 @@
 {
     PyObject *ocon_ids, *oxi, *oyi, *ozi;
     PyArrayObject *con_ids, *xi, *yi, *zi;
-    xi=yi=zi=con_ids=NULL;
     npy_int64 i, j, k, n;
     int status;
+    PyObject *retval;
+
+    xi=yi=zi=con_ids=NULL;
 
     i = 0;
     if (!PyArg_ParseTuple(args, "OOOO",
@@ -1107,7 +1129,7 @@
     Py_DECREF(yi);
     Py_DECREF(zi);
 
-    PyObject *retval = PYINTCONV_FROM(status);
+    retval = PYINTCONV_FROM(status);
     return retval;
 
     _fail:
@@ -1125,10 +1147,10 @@
   int spawn_check, status;
   int mi, mj, mk;
   static int stack_depth;
+  npy_int64 *fd_off, *fd_ijk;
   if (first == 1) stack_depth = 0;
   else stack_depth++;
   if (stack_depth > 10000) return -1;
-  npy_int64 *fd_off, *fd_ijk;
   mi = con_ids->dimensions[0];
   mj = con_ids->dimensions[1];
   mk = con_ids->dimensions[2];
@@ -1264,10 +1286,23 @@
 {
     PyObject *omass, *ox, *oy, *oz;
     PyArrayObject *mass, *x, *y, *z;
-    x=y=z=mass=NULL;
     int truncate;
     double kinetic_energy;
 
+    int q_outer, q_inner, n_q;
+    double this_potential, total_potential;
+    npy_float64 mass_o, x_o, y_o, z_o;
+    npy_float64 mass_i, x_i, y_i, z_i;
+
+    /* progress bar stuff */
+    float totalWork;
+    float workDone;
+    int every_cells;
+    int until_output;
+    PyObject *status;
+
+    x=y=z=mass=NULL;
+
     if (!PyArg_ParseTuple(args, "OOOOid",
         &omass, &ox, &oy, &oz, &truncate, &kinetic_energy))
         return PyErr_Format(_findBindingEnergyError,
@@ -1313,17 +1348,14 @@
     }
 
     /* Do the work here. */
-    int q_outer, q_inner, n_q = PyArray_SIZE(mass);
-    double this_potential, total_potential;
+    q_outer, q_inner, n_q = PyArray_SIZE(mass);
     total_potential = 0;
-    npy_float64 mass_o, x_o, y_o, z_o;
-    npy_float64 mass_i, x_i, y_i, z_i;
 
     /* progress bar stuff */
-    float totalWork = 0.5 * (pow(n_q,2.0) - n_q);
-    float workDone = 0;
-    int every_cells = floor(n_q / 100);
-    int until_output = 1;
+    totalWork = 0.5 * (pow(n_q,2.0) - n_q);
+    workDone = 0;
+    every_cells = floor(n_q / 100);
+    until_output = 1;
     for (q_outer = 0; q_outer < n_q - 1; q_outer++) {
         this_potential = 0;
         mass_o = *(npy_float64*) PyArray_GETPTR1(mass, q_outer);
@@ -1361,7 +1393,7 @@
     Py_DECREF(x);
     Py_DECREF(y);
     Py_DECREF(z);
-    PyObject *status = PyFloat_FromDouble(total_potential);
+    status = PyFloat_FromDouble(total_potential);
     return status;
 
     _fail:
@@ -1381,6 +1413,7 @@
     PyArrayObject *array;
     char *filename, *header = NULL;
     npy_intp i, j, imax, jmax;
+    FILE *to_write;
 
     if (!PyArg_ParseTuple(args, "Os|s", &oarray, &filename, &header))
         return PyErr_Format(_outputFloatsToFileError,
@@ -1395,7 +1428,7 @@
     goto _fail;
     }
 
-    FILE *to_write = fopen(filename, "w");
+    to_write = fopen(filename, "w");
     if(to_write == NULL){
     PyErr_Format(_outputFloatsToFileError,
              "OutputFloatsToFile: Unable to open %s for writing.", filename);


https://bitbucket.org/yt_analysis/yt/commits/188a780a6549/
Changeset:   188a780a6549
Branch:      yt
User:        jzuhone
Date:        2016-04-03 17:04:10+00:00
Summary:     Make this compile with MSVC
Affected #:  1 file

diff -r e12dc3d2a9bb075930c8250fa8bb576d325b2a4f -r 188a780a6549471720bf1143285c158ce7a91e7e yt/visualization/_MPL.c
--- a/yt/visualization/_MPL.c
+++ b/yt/visualization/_MPL.c
@@ -41,14 +41,26 @@
 
   PyObject *xp, *yp, *dxp, *dyp, *dp;
   PyArrayObject *x, *y, *dx, *dy, *d;
-  xp = yp = dxp = dyp = dp = NULL;
-  x = y = dx = dy = d = NULL;
   unsigned int rows, cols;
   int antialias = 1;
   double x_min, x_max, y_min, y_max;
   double period_x, period_y;
+  int check_period = 1, nx;
+  int i, j, p, xi, yi;
+  double lc, lr, rc, rr;
+  double lypx, rypx, lxpx, rxpx, overlap1, overlap2;
+  npy_float64 oxsp, oysp, xsp, ysp, dxsp, dysp, dsp;
+  int xiter[2], yiter[2];
+  double xiterv[2], yiterv[2];
+  npy_intp dims[2];
+  PyArrayObject *my_array;
+  double width, height, px_dx, px_dy, ipx_dx, ipx_dy;
+  PyObject *return_value;
+
+  xp = yp = dxp = dyp = dp = NULL;
+  x = y = dx = dy = d = NULL;
+
   period_x = period_y = 0;
-  int check_period = 1;
 
   if (!PyArg_ParseTuple(args, "OOOOOII(dddd)|i(dd)i",
       &xp, &yp, &dxp, &dyp, &dp, &cols, &rows,
@@ -56,12 +68,12 @@
       &antialias, &period_x, &period_y, &check_period))
       return PyErr_Format(_pixelizeError, "Pixelize: Invalid Parameters.");
 
-  double width = x_max - x_min;
-  double height = y_max - y_min;
-  double px_dx = width / ((double) rows);
-  double px_dy = height / ((double) cols);
-  double ipx_dx = 1.0 / px_dx;
-  double ipx_dy = 1.0 / px_dy;
+  width = x_max - x_min;
+  height = y_max - y_min;
+  px_dx = width / ((double) rows);
+  px_dy = height / ((double) cols);
+  ipx_dx = 1.0 / px_dx;
+  ipx_dy = 1.0 / px_dy;
 
   // Check we have something to output to
   if (rows == 0 || cols ==0)
@@ -103,20 +115,13 @@
   }
 
   // Check dimensions match
-  int nx = PyArray_DIMS(x)[0];
+  nx = PyArray_DIMS(x)[0];
 
   // Calculate the pointer arrays to map input x to output x
-  int i, j, p, xi, yi;
-  double lc, lr, rc, rr;
-  double lypx, rypx, lxpx, rxpx, overlap1, overlap2;
-  npy_float64 oxsp, oysp, xsp, ysp, dxsp, dysp, dsp;
-  int xiter[2], yiter[2];
-  double xiterv[2], yiterv[2];
 
-  
-
-  npy_intp dims[] = {rows, cols};
-  PyArrayObject *my_array =
+  dims[0] = rows;
+  dims[1] = cols;
+  my_array =
     (PyArrayObject *) PyArray_SimpleNewFromDescr(2, dims,
               PyArray_DescrFromType(NPY_FLOAT64));
   //npy_float64 *gridded = (npy_float64 *) my_array->data;
@@ -189,7 +194,7 @@
   Py_DECREF(dx);
   Py_DECREF(dy);
 
-  PyObject *return_value = Py_BuildValue("N", my_array);
+  return_value = Py_BuildValue("N", my_array);
 
   return return_value;
 
@@ -209,28 +214,54 @@
   PyObject *xp, *yp, *zp, *pxp, *pyp,
            *dxp, *dyp, *dzp, *dp,
            *centerp, *inv_matp, *indicesp;
+  PyArrayObject *x, *y, *z, *px, *py, *d,
+                *dx, *dy, *dz, *center, *inv_mat, *indices;
+  unsigned int rows, cols;
+  double px_min, px_max, py_min, py_max;
+  double width, height;
+  long double px_dx, px_dy;
+  int i, j, p, nx;
+  int lc, lr, rc, rr;
+  long double md, cxpx, cypx;
+  long double cx, cy, cz;
+  npy_float64 *centers;
+  npy_intp *dims;
+
+  PyArrayObject *my_array;
+  npy_float64 *gridded;
+  npy_float64 *mask;
+
+  int pp;
+
+  npy_float64 inv_mats[3][3];
+
+  npy_float64 xsp;
+  npy_float64 ysp;
+  npy_float64 zsp;
+  npy_float64 pxsp;
+  npy_float64 pysp;
+  npy_float64 dxsp;
+  npy_float64 dysp;
+  npy_float64 dzsp;
+  npy_float64 dsp;
+
+  PyObject *return_value;
 
   xp = yp = zp = pxp = pyp = dxp = dyp = dzp = dp = NULL;
   centerp = inv_matp = indicesp = NULL;
 
-  PyArrayObject *x, *y, *z, *px, *py, *d,
-                *dx, *dy, *dz, *center, *inv_mat, *indices;
-
   x = y = z = px = py = dx = dy = dz = d = NULL;
   center = inv_mat = indices = NULL;
 
-  unsigned int rows, cols;
-  double px_min, px_max, py_min, py_max;
-
     if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOII(dddd)",
         &xp, &yp, &zp, &pxp, &pyp, &dxp, &dyp, &dzp, &centerp, &inv_matp,
         &indicesp, &dp, &cols, &rows, &px_min, &px_max, &py_min, &py_max))
         return PyErr_Format(_pixelizeError, "CPixelize: Invalid Parameters.");
 
-  double width = px_max - px_min;
-  double height = py_max - py_min;
-  long double px_dx = width / ((double) rows);
-  long double px_dy = height / ((double) cols);
+  width = px_max - px_min;
+  height = py_max - py_min;
+  px_dx = width / ((double) rows);
+  px_dy = height / ((double) cols);
 
   // Check we have something to output to
   if (rows == 0 || cols ==0)
@@ -317,41 +348,36 @@
   }
 
   // Check dimensions match
-  int nx = PyArray_DIMS(x)[0];
+  nx = PyArray_DIMS(x)[0];
 
   // Calculate the pointer arrays to map input x to output x
-  int i, j, p;
-  int lc, lr, rc, rr;
-  long double md, cxpx, cypx;
-  long double cx, cy, cz;
 
-  npy_float64 *centers = (npy_float64 *) PyArray_GETPTR1(center,0);
+  centers = (npy_float64 *) PyArray_GETPTR1(center,0);
 
-  npy_intp dims[] = {rows, cols};
-  PyArrayObject *my_array =
+  dims[0] = rows;
+  dims[1] = cols;
+  my_array =
     (PyArrayObject *) PyArray_SimpleNewFromDescr(2, dims,
               PyArray_DescrFromType(NPY_FLOAT64));
-  npy_float64 *gridded = (npy_float64 *) PyArray_DATA(my_array);
-  npy_float64 *mask = malloc(sizeof(npy_float64)*rows*cols);
+  gridded = (npy_float64 *) PyArray_DATA(my_array);
+  mask = malloc(sizeof(npy_float64)*rows*cols);
 
-  npy_float64 inv_mats[3][3];
   for(i=0;i<3;i++)for(j=0;j<3;j++)
       inv_mats[i][j]=*(npy_float64*)PyArray_GETPTR2(inv_mat,i,j);
 
-  int pp;
   for(p=0;p<cols*rows;p++)gridded[p]=mask[p]=0.0;
   for(pp=0; pp<nx; pp++)
   {
     p = *((npy_int64 *) PyArray_GETPTR1(indices, pp));
-    npy_float64 xsp = *((npy_float64 *) PyArray_GETPTR1(x, p));
-    npy_float64 ysp = *((npy_float64 *) PyArray_GETPTR1(y, p));
-    npy_float64 zsp = *((npy_float64 *) PyArray_GETPTR1(z, p));
-    npy_float64 pxsp = *((npy_float64 *) PyArray_GETPTR1(px, p));
-    npy_float64 pysp = *((npy_float64 *) PyArray_GETPTR1(py, p));
-    npy_float64 dxsp = *((npy_float64 *) PyArray_GETPTR1(dx, p));
-    npy_float64 dysp = *((npy_float64 *) PyArray_GETPTR1(dy, p));
-    npy_float64 dzsp = *((npy_float64 *) PyArray_GETPTR1(dz, p));
-    npy_float64 dsp = *((npy_float64 *) PyArray_GETPTR1(d, p)); // We check this above
+    xsp = *((npy_float64 *) PyArray_GETPTR1(x, p));
+    ysp = *((npy_float64 *) PyArray_GETPTR1(y, p));
+    zsp = *((npy_float64 *) PyArray_GETPTR1(z, p));
+    pxsp = *((npy_float64 *) PyArray_GETPTR1(px, p));
+    pysp = *((npy_float64 *) PyArray_GETPTR1(py, p));
+    dxsp = *((npy_float64 *) PyArray_GETPTR1(dx, p));
+    dysp = *((npy_float64 *) PyArray_GETPTR1(dy, p));
+    dzsp = *((npy_float64 *) PyArray_GETPTR1(dz, p));
+    dsp = *((npy_float64 *) PyArray_GETPTR1(d, p)); // We check this above
     // Any point we want to plot is at most this far from the center
     md = 2.0*sqrtl(dxsp*dxsp + dysp*dysp + dzsp*dzsp);
     if(((pxsp+md<px_min) ||
@@ -395,7 +421,7 @@
   Py_DECREF(inv_mat);
   free(mask);
 
-  PyObject *return_value = Py_BuildValue("N", my_array);
+  return_value = Py_BuildValue("N", my_array);
 
   return return_value;
 


https://bitbucket.org/yt_analysis/yt/commits/21740d4ac68b/
Changeset:   21740d4ac68b
Branch:      yt
User:        jzuhone
Date:        2016-04-03 17:10:35+00:00
Summary:     Add a comment showing the origin
Affected #:  1 file

diff -r 188a780a6549471720bf1143285c158ce7a91e7e -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 yt/utilities/lib/math_utils.h
--- a/yt/utilities/lib/math_utils.h
+++ b/yt/utilities/lib/math_utils.h
@@ -1,3 +1,4 @@
+/* Taken from http://siliconandlithium.blogspot.com/2014/05/msvc-c99-mathh-header.html */
 #include <math.h>
 #define isnormal(x) ((_fpclass(x) == _FPCLASS_NN) || (_fpclass(x) == _FPCLASS_PN))
 static __inline double rint(double x){


https://bitbucket.org/yt_analysis/yt/commits/5efd466517cf/
Changeset:   5efd466517cf
Branch:      yt
User:        jzuhone
Date:        2016-04-03 18:46:37+00:00
Summary:     Fix ARTIO for MSVC, and stick everything into platform_dep.h
Affected #:  20 files

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -11,15 +11,16 @@
 from yt.geometry.oct_visitors cimport Oct
 from yt.geometry.particle_deposit cimport \
     ParticleDepositOperation
-from libc.stdint cimport int32_t, int64_t
 from libc.stdlib cimport malloc, free
 from libc.string cimport memcpy
 import data_structures
 from yt.utilities.lib.misc_utilities import OnceIndirect
 
 cdef extern from "platform_dep.h":
+    ctypedef int int32_t
+    ctypedef long long int64_t
     void *alloca(int)
-    
+
 cdef extern from "cosmology.h":
     ctypedef struct CosmologyParameters "CosmologyParameters" :
         pass

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio.c
--- a/yt/frontends/artio/artio_headers/artio.c
+++ b/yt/frontends/artio/artio_headers/artio.c
@@ -24,10 +24,16 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include <stdint.h>
 #include <string.h>
 #include <math.h>
 
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
+#include <stdint.h>
+#endif
+
 artio_fileset *artio_fileset_allocate( char *file_prefix, int mode,
 		const artio_context *context );
 void artio_fileset_destroy( artio_fileset *handle );

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio.h
--- a/yt/frontends/artio/artio_headers/artio.h
+++ b/yt/frontends/artio/artio_headers/artio.h
@@ -30,13 +30,11 @@
 #include <mpi.h>
 #endif
 
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
-#ifndef int64_t
-#ifdef _WIN32
-typedef __int64 int64_t;
-#endif
-#else
-#error "Undefined int64_t!"
 #endif
 
 #define ARTIO_OPEN_HEADER					0

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_endian.c
--- a/yt/frontends/artio/artio_headers/artio_endian.c
+++ b/yt/frontends/artio/artio_headers/artio_endian.c
@@ -22,7 +22,12 @@
 
 #include "artio_endian.h"
 
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
+#endif
 
 void artio_int_swap(int32_t *src, int count) {
 	int i;

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_endian.h
--- a/yt/frontends/artio/artio_headers/artio_endian.h
+++ b/yt/frontends/artio/artio_headers/artio_endian.h
@@ -23,7 +23,12 @@
 #ifndef __ARTIO_EDIAN_H__
 #define __ARTIO_EDIAN_H__
 
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
+#endif
 
 void artio_int_swap(int32_t *src, int count);
 void artio_float_swap(float *src, int count);

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_grid.c
--- a/yt/frontends/artio/artio_headers/artio_grid.c
+++ b/yt/frontends/artio/artio_headers/artio_grid.c
@@ -25,8 +25,14 @@
 
 #include <stdio.h>
 #include <stdlib.h>
+#include <math.h>
+
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
-#include <math.h>
+#endif
 
 int artio_grid_find_file(artio_grid_file *ghandle, int start, int end, int64_t sfc);
 artio_grid_file *artio_grid_file_allocate(void);

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_internal.h
--- a/yt/frontends/artio/artio_headers/artio_internal.h
+++ b/yt/frontends/artio/artio_headers/artio_internal.h
@@ -28,8 +28,14 @@
 #endif
 
 #include <stdlib.h>
+#include <limits.h>
+
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
-#include <limits.h>
+#endif
 
 #include "artio_endian.h"
 

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_mpi.c
--- a/yt/frontends/artio/artio_headers/artio_mpi.c
+++ b/yt/frontends/artio/artio_headers/artio_mpi.c
@@ -28,7 +28,13 @@
 #include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
+
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
+#endif
 
 artio_context artio_context_global_struct = { MPI_COMM_WORLD };
 const artio_context *artio_context_global = &artio_context_global_struct;

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_parameter.c
--- a/yt/frontends/artio/artio_headers/artio_parameter.c
+++ b/yt/frontends/artio/artio_headers/artio_parameter.c
@@ -25,8 +25,13 @@
 
 #include <stdlib.h>
 #include <stdio.h>
+#include <string.h>
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
-#include <string.h>
+#endif
 
 size_t artio_type_size(int type) {
 	size_t t_len=0;

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_particle.c
--- a/yt/frontends/artio/artio_headers/artio_particle.c
+++ b/yt/frontends/artio/artio_headers/artio_particle.c
@@ -26,7 +26,12 @@
 #include <math.h>
 #include <stdio.h>
 #include <stdlib.h>
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
+#endif
 
 int artio_particle_find_file(artio_particle_file *phandle, int start, int end, int64_t sfc);
 artio_particle_file *artio_particle_file_allocate(void);

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_posix.c
--- a/yt/frontends/artio/artio_headers/artio_posix.c
+++ b/yt/frontends/artio/artio_headers/artio_posix.c
@@ -28,8 +28,13 @@
 #include <stdio.h>
 #include <string.h>
 #include <stdlib.h>
+#include <assert.h>
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
 #include <stdint.h>
-#include <assert.h>
+#endif
 
 struct ARTIO_FH {
 	FILE *fh;
@@ -51,13 +56,14 @@
 const artio_context *artio_context_global = &artio_context_global_struct;
 
 artio_fh *artio_file_fopen_i( char * filename, int mode, const artio_context *not_used ) {
+	artio_fh *ffh;
 	/* check for invalid combination of mode parameter */
 	if ( ( mode & ARTIO_MODE_READ && mode & ARTIO_MODE_WRITE ) ||
 			!( mode & ARTIO_MODE_READ || mode & ARTIO_MODE_WRITE ) ) {
 		return NULL;
 	}
 
-	artio_fh *ffh = (artio_fh *)malloc(sizeof(artio_fh));
+	ffh = (artio_fh *)malloc(sizeof(artio_fh));
 	if ( ffh == NULL ) {
 		return NULL;
 	}

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/artio_selector.c
--- a/yt/frontends/artio/artio_headers/artio_selector.c
+++ b/yt/frontends/artio/artio_headers/artio_selector.c
@@ -23,10 +23,15 @@
 #include "artio.h"
 #include "artio_internal.h"
 
-#include <stdint.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <math.h>
+#if defined(_WIN32) || defined(_WIN64)
+typedef __int64 int64_t;
+typedef __int32 int32_t;
+#else
+#include <stdint.h>
+#endif
 
 #define ARTIO_SELECTION_LIST_SIZE		1024
 #define ARTIO_SELECTION_VOLUME_LIMIT	(1L<<60)

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/frontends/artio/artio_headers/cosmology.c
--- a/yt/frontends/artio/artio_headers/cosmology.c
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -216,6 +216,11 @@
   f[3] = 1.5*c->OmegaM*y[2]/mu;
 }
 
+#if defined(_WIN32) || defined(_WIN64)
+double asinh(double x){
+    return log(x + sqrt((x * x) + 1.0));
+}
+#endif
 
 void cosmology_fill_table_piece(CosmologyParameters *c, int istart, int n)
 {
@@ -444,6 +449,7 @@
 double cosmology_get_value_from_table(CosmologyParameters *c, double a, double table[])
 {
   // This is special case code for boundary conditions
+  int idx;
   double la = log10(a);
   if (fabs(la - c->la[c->size-1]) < 1.0e-14) {
     return table[c->size-1];
@@ -451,7 +457,7 @@
     return table[0];
   }
 
-  int idx = (int)(c->ndex*(la-c->la[0]));
+  idx = (int)(c->ndex*(la-c->la[0]));
 
   // Note that because we do idx+1 below, we need -1 here.
   ASSERT(idx>=0 && (idx<c->size-1));

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/geometry/grid_container.pxd
--- a/yt/geometry/grid_container.pxd
+++ b/yt/geometry/grid_container.pxd
@@ -64,5 +64,5 @@
 			 np.float64_t z,
 			 GridTreeNode *grid)
 
-cdef extern from "math_utils.h" nogil:
+cdef extern from "platform_dep.h" nogil:
     double rint(double x)
\ No newline at end of file

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -6,7 +6,7 @@
 from cython.parallel import parallel, prange
 from vec3_ops cimport dot, subtract, cross
 
-cdef extern from "math_utils.h" nogil:
+cdef extern from "platform_dep.h" nogil:
     double fmax(double x, double y)
     double fmin(double x, double y)
 

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -20,7 +20,7 @@
 import numpy as np
 from libc.math cimport fabs
 
-cdef extern from "math_utils.h":
+cdef extern from "platform_dep.h":
     double fmax(double x, double y) nogil
 
 @cython.boundscheck(False)

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -30,7 +30,7 @@
     double fmod(double x, double y) nogil
     double fabs(double x) nogil
 
-cdef extern from "math_utils.h":
+cdef extern from "platform_dep.h":
     double log2(double x) nogil
     long int lrint(double x) nogil
 

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -28,7 +28,7 @@
     FIT_eval_transfer_with_light
 from fixed_interpolator cimport *
 
-cdef extern from "math_utils.h":
+cdef extern from "platform_dep.h":
     long int lrint(double x) nogil
 
 from cython.parallel import prange, parallel, threadid

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/utilities/lib/math_utils.h
--- a/yt/utilities/lib/math_utils.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Taken from http://siliconandlithium.blogspot.com/2014/05/msvc-c99-mathh-header.html */
-#include <math.h>
-#define isnormal(x) ((_fpclass(x) == _FPCLASS_NN) || (_fpclass(x) == _FPCLASS_PN))
-static __inline double rint(double x){
-    const double two_to_52 = 4.5035996273704960e+15;
-    double fa = fabs(x);
-    if(fa >= two_to_52){
-        return x;
-    } else{
-        return copysign(two_to_52 + fa - two_to_52, x);
-    }
-}
-static __inline long int lrint(double x){
-    return (long)rint(x);
-}
-static __inline double fmax(double x, double y){
-    return (x > y) ? x : y;
-}
-static __inline double fmin(double x, double y){
-    return (x < y) ? x : y;
-}
-static __inline double log2(double x) {
-    return log(x) * M_LOG2E;
-}
-
-

diff -r 21740d4ac68b9131b9956c46e0ce9c0348e6c465 -r 5efd466517cffe300ddf55a9dc0388f37008f3cb yt/utilities/lib/platform_dep.h
--- a/yt/utilities/lib/platform_dep.h
+++ b/yt/utilities/lib/platform_dep.h
@@ -1,5 +1,34 @@
-#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
-#include "malloc.h"
-#else
-#include "alloca.h"
-#endif
\ No newline at end of file
+/* Taken from http://siliconandlithium.blogspot.com/2014/05/msvc-c99-mathh-header.html */
+#include <math.h>
+#if defined(_WIN32) || defined(_WIN64)
+#include "malloc.h"
+typedef int int32_t;
+typedef long long int64_t;
+#define isnormal(x) ((_fpclass(x) == _FPCLASS_NN) || (_fpclass(x) == _FPCLASS_PN))
+static __inline double rint(double x){
+    const double two_to_52 = 4.5035996273704960e+15;
+    double fa = fabs(x);
+    if(fa >= two_to_52){
+        return x;
+    } else{
+        return copysign(two_to_52 + fa - two_to_52, x);
+    }
+}
+static __inline long int lrint(double x){
+    return (long)rint(x);
+}
+static __inline double fmax(double x, double y){
+    return (x > y) ? x : y;
+}
+static __inline double fmin(double x, double y){
+    return (x < y) ? x : y;
+}
+static __inline double log2(double x) {
+    return log(x) * M_LOG2E;
+}
+#else
+#include <stdint.h>
+#include "alloca.h"
+#endif
+
+


https://bitbucket.org/yt_analysis/yt/commits/d0e55956b4c8/
Changeset:   d0e55956b4c8
Branch:      yt
User:        jzuhone
Date:        2016-04-08 14:19:31+00:00
Summary:     MSVC wants 64-bit ints
Affected #:  1 file

diff -r 5efd466517cffe300ddf55a9dc0388f37008f3cb -r d0e55956b4c8fc123e44ed7542308920ce6925de yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -133,8 +133,8 @@
         dz = np.array(np.dot(pos - front_center, -camera.unit_vectors[2]))
         # Transpose into image coords.
 
-        py = (res[0]*(dx/width[0])).astype('int')
-        px = (res[1]*(dy/width[1])).astype('int')
+        py = (res[0]*(dx/width[0])).astype('int64')
+        px = (res[1]*(dy/width[1])).astype('int64')
         return px, py, dz
 
     def __repr__(self):


https://bitbucket.org/yt_analysis/yt/commits/de59248786ee/
Changeset:   de59248786ee
Branch:      yt
User:        jzuhone
Date:        2016-04-08 14:54:12+00:00
Summary:     Make sure the connectivity is int64 for MSVC
Affected #:  2 files

diff -r d0e55956b4c8fc123e44ed7542308920ce6925de -r de59248786eefda3c142701cbaf9ba5736267a2b yt/frontends/stream/sample_data/hexahedral_mesh.py
--- a/yt/frontends/stream/sample_data/hexahedral_mesh.py
+++ b/yt/frontends/stream/sample_data/hexahedral_mesh.py
@@ -897,4 +897,4 @@
     [301,  300,  515,  504,  303,  302,  531,  520],
     [14,  13,  517,  516,  16,  15,  533,  532],
     [23,  14,  516,  300,  24,  16,  532,  302]
-    ])
+    ], dtype='int64')

diff -r d0e55956b4c8fc123e44ed7542308920ce6925de -r de59248786eefda3c142701cbaf9ba5736267a2b yt/frontends/stream/sample_data/tetrahedral_mesh.py
--- a/yt/frontends/stream/sample_data/tetrahedral_mesh.py
+++ b/yt/frontends/stream/sample_data/tetrahedral_mesh.py
@@ -3282,4 +3282,4 @@
     [356, 260, 268, 339],
     [356, 357, 346, 339],
     [356, 357, 260, 339]
-])
+], dtype='int64')


https://bitbucket.org/yt_analysis/yt/commits/4c0afb8c9266/
Changeset:   4c0afb8c9266
Branch:      yt
User:        jzuhone
Date:        2016-04-08 15:08:25+00:00
Summary:     Windows gets grumpy about the filenames specified in this way
Affected #:  1 file

diff -r de59248786eefda3c142701cbaf9ba5736267a2b -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 yt/visualization/tests/test_offaxisprojection.py
--- a/yt/visualization/tests/test_offaxisprojection.py
+++ b/yt/visualization/tests/test_offaxisprojection.py
@@ -54,7 +54,7 @@
     oap_kwargs_list = expand_keywords(oap_kwargs)
 
     # args or write_projection
-    fn = "test_%s.png"
+    fn = "test_%d.png"
 
     # kwargs for write_projection
     wp_kwargs = {}
@@ -71,11 +71,11 @@
     # test all off_axis_projection kwargs and write_projection kwargs
     # make sure they are able to be projected, then remove and try next
     # iteration
-    for oap_kwargs in oap_kwargs_list:
+    for i, oap_kwargs in enumerate(oap_kwargs_list):
         image = off_axis_projection(*oap_args, **oap_kwargs)
         for wp_kwargs in wp_kwargs_list:
-            write_projection(image, fn % oap_kwargs, **wp_kwargs)
-            yield assert_equal, os.path.exists(fn % oap_kwargs), True
+            write_projection(image, fn % i, **wp_kwargs)
+            yield assert_equal, os.path.exists(fn % i), True
 
     if tmpdir:
         os.chdir(curdir)


https://bitbucket.org/yt_analysis/yt/commits/e4fb689d67e0/
Changeset:   e4fb689d67e0
Branch:      yt
User:        jzuhone
Date:        2016-04-09 02:51:18+00:00
Summary:     Simplify the use of the "WIN" macros
Affected #:  18 files

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/analysis_modules/halo_finding/fof/kd.c
--- a/yt/analysis_modules/halo_finding/fof/kd.c
+++ b/yt/analysis_modules/halo_finding/fof/kd.c
@@ -1,7 +1,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
-#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+#ifdef _WIN32
 #include <windows.h>
 #else
 #include <sys/resource.h>
@@ -15,7 +15,7 @@
 void kdTimeFoF(KDFOF kd,int *puSecond,int *puMicro)
 {
 
-#if defined(WIN32) || defined(WIN64) || defined(_WIN64)
+#ifdef _WIN32
         int secs, usecs;
         HANDLE hProcess = GetCurrentProcess();
 	FILETIME ftCreation, ftExit, ftKernel, ftUser;

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/analysis_modules/halo_finding/hop/hop_hop.c
--- a/yt/analysis_modules/halo_finding/hop/hop_hop.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_hop.c
@@ -15,7 +15,7 @@
  
 #include <stdio.h>
 #include <stdlib.h>
-#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+#ifdef _WIN32
 #define _USE_MATH_DEFINES
 #endif
 #include <math.h>

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/analysis_modules/halo_finding/hop/hop_kd.c
--- a/yt/analysis_modules/halo_finding/hop/hop_kd.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_kd.c
@@ -12,7 +12,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
-#if defined(WIN32) || defined(WIN64) || defined(_WIN32) || defined(_WIN64)
+#ifdef _WIN32
 #include <windows.h> 
 #else
 #include <sys/time.h>
@@ -31,7 +31,7 @@
 void kdTime(KD kd,int *puSecond,int *puMicro)
 {
 
-#if defined(WIN32) || defined(WIN64)
+#ifdef _WIN32
         int secs, usecs;
         HANDLE hProcess = GetCurrentProcess();
 	FILETIME ftCreation, ftExit, ftKernel, ftUser;

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/analysis_modules/halo_finding/hop/hop_smooth.c
--- a/yt/analysis_modules/halo_finding/hop/hop_smooth.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_smooth.c
@@ -15,7 +15,7 @@
  
 #include <stdio.h>
 #include <stdlib.h>
-#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+#ifdef _WIN32
 #define _USE_MATH_DEFINES
 #endif
 #include <math.h>

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio.c
--- a/yt/frontends/artio/artio_headers/artio.c
+++ b/yt/frontends/artio/artio_headers/artio.c
@@ -27,7 +27,7 @@
 #include <string.h>
 #include <math.h>
 
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio.h
--- a/yt/frontends/artio/artio_headers/artio.h
+++ b/yt/frontends/artio/artio_headers/artio.h
@@ -30,7 +30,7 @@
 #include <mpi.h>
 #endif
 
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_endian.c
--- a/yt/frontends/artio/artio_headers/artio_endian.c
+++ b/yt/frontends/artio/artio_headers/artio_endian.c
@@ -22,7 +22,7 @@
 
 #include "artio_endian.h"
 
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_endian.h
--- a/yt/frontends/artio/artio_headers/artio_endian.h
+++ b/yt/frontends/artio/artio_headers/artio_endian.h
@@ -23,7 +23,7 @@
 #ifndef __ARTIO_EDIAN_H__
 #define __ARTIO_EDIAN_H__
 
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_grid.c
--- a/yt/frontends/artio/artio_headers/artio_grid.c
+++ b/yt/frontends/artio/artio_headers/artio_grid.c
@@ -27,7 +27,7 @@
 #include <stdlib.h>
 #include <math.h>
 
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_internal.h
--- a/yt/frontends/artio/artio_headers/artio_internal.h
+++ b/yt/frontends/artio/artio_headers/artio_internal.h
@@ -30,7 +30,7 @@
 #include <stdlib.h>
 #include <limits.h>
 
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_mpi.c
--- a/yt/frontends/artio/artio_headers/artio_mpi.c
+++ b/yt/frontends/artio/artio_headers/artio_mpi.c
@@ -29,7 +29,7 @@
 #include <stdio.h>
 #include <string.h>
 
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_parameter.c
--- a/yt/frontends/artio/artio_headers/artio_parameter.c
+++ b/yt/frontends/artio/artio_headers/artio_parameter.c
@@ -26,7 +26,7 @@
 #include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_particle.c
--- a/yt/frontends/artio/artio_headers/artio_particle.c
+++ b/yt/frontends/artio/artio_headers/artio_particle.c
@@ -26,7 +26,7 @@
 #include <math.h>
 #include <stdio.h>
 #include <stdlib.h>
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_posix.c
--- a/yt/frontends/artio/artio_headers/artio_posix.c
+++ b/yt/frontends/artio/artio_headers/artio_posix.c
@@ -29,7 +29,7 @@
 #include <string.h>
 #include <stdlib.h>
 #include <assert.h>
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else
@@ -45,7 +45,7 @@
 	int bfend;
 };
 
-#ifdef _WIN32
+#ifdef MS_WIN32
 #define FOPEN_FLAGS "b"
 #define fseek _fseeki64
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/artio_selector.c
--- a/yt/frontends/artio/artio_headers/artio_selector.c
+++ b/yt/frontends/artio/artio_headers/artio_selector.c
@@ -26,7 +26,7 @@
 #include <stdlib.h>
 #include <stdio.h>
 #include <math.h>
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/frontends/artio/artio_headers/cosmology.c
--- a/yt/frontends/artio/artio_headers/cosmology.c
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -216,7 +216,7 @@
   f[3] = 1.5*c->OmegaM*y[2]/mu;
 }
 
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 double asinh(double x){
     return log(x + sqrt((x * x) + 1.0));
 }

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/utilities/lib/kdtree.c
--- a/yt/utilities/lib/kdtree.c
+++ b/yt/utilities/lib/kdtree.c
@@ -31,7 +31,7 @@
 #include <math.h>
 #include "kdtree.h"
 
-#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
+#ifdef MS_WIN32
 #include <malloc.h>
 #endif
 

diff -r 4c0afb8c92668f46f2f8845ed8d2caea7dc23a03 -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd yt/utilities/lib/platform_dep.h
--- a/yt/utilities/lib/platform_dep.h
+++ b/yt/utilities/lib/platform_dep.h
@@ -1,9 +1,9 @@
-/* Taken from http://siliconandlithium.blogspot.com/2014/05/msvc-c99-mathh-header.html */
 #include <math.h>
-#if defined(_WIN32) || defined(_WIN64)
+#ifdef MS_WIN32
 #include "malloc.h"
 typedef int int32_t;
 typedef long long int64_t;
+/* Taken from http://siliconandlithium.blogspot.com/2014/05/msvc-c99-mathh-header.html */
 #define isnormal(x) ((_fpclass(x) == _FPCLASS_NN) || (_fpclass(x) == _FPCLASS_PN))
 static __inline double rint(double x){
     const double two_to_52 = 4.5035996273704960e+15;


https://bitbucket.org/yt_analysis/yt/commits/75b415321e1a/
Changeset:   75b415321e1a
Branch:      yt
User:        jzuhone
Date:        2016-04-09 03:14:21+00:00
Summary:     Making sure we have the right macross
Affected #:  12 files

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio.c
--- a/yt/frontends/artio/artio_headers/artio.c
+++ b/yt/frontends/artio/artio_headers/artio.c
@@ -27,7 +27,7 @@
 #include <string.h>
 #include <math.h>
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio.h
--- a/yt/frontends/artio/artio_headers/artio.h
+++ b/yt/frontends/artio/artio_headers/artio.h
@@ -30,7 +30,7 @@
 #include <mpi.h>
 #endif
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_endian.c
--- a/yt/frontends/artio/artio_headers/artio_endian.c
+++ b/yt/frontends/artio/artio_headers/artio_endian.c
@@ -22,7 +22,7 @@
 
 #include "artio_endian.h"
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_endian.h
--- a/yt/frontends/artio/artio_headers/artio_endian.h
+++ b/yt/frontends/artio/artio_headers/artio_endian.h
@@ -23,7 +23,7 @@
 #ifndef __ARTIO_EDIAN_H__
 #define __ARTIO_EDIAN_H__
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_grid.c
--- a/yt/frontends/artio/artio_headers/artio_grid.c
+++ b/yt/frontends/artio/artio_headers/artio_grid.c
@@ -27,7 +27,7 @@
 #include <stdlib.h>
 #include <math.h>
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_internal.h
--- a/yt/frontends/artio/artio_headers/artio_internal.h
+++ b/yt/frontends/artio/artio_headers/artio_internal.h
@@ -30,7 +30,7 @@
 #include <stdlib.h>
 #include <limits.h>
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_mpi.c
--- a/yt/frontends/artio/artio_headers/artio_mpi.c
+++ b/yt/frontends/artio/artio_headers/artio_mpi.c
@@ -29,7 +29,7 @@
 #include <stdio.h>
 #include <string.h>
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_parameter.c
--- a/yt/frontends/artio/artio_headers/artio_parameter.c
+++ b/yt/frontends/artio/artio_headers/artio_parameter.c
@@ -26,7 +26,7 @@
 #include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_particle.c
--- a/yt/frontends/artio/artio_headers/artio_particle.c
+++ b/yt/frontends/artio/artio_headers/artio_particle.c
@@ -26,7 +26,7 @@
 #include <math.h>
 #include <stdio.h>
 #include <stdlib.h>
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_posix.c
--- a/yt/frontends/artio/artio_headers/artio_posix.c
+++ b/yt/frontends/artio/artio_headers/artio_posix.c
@@ -29,7 +29,7 @@
 #include <string.h>
 #include <stdlib.h>
 #include <assert.h>
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else
@@ -45,7 +45,7 @@
 	int bfend;
 };
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 #define FOPEN_FLAGS "b"
 #define fseek _fseeki64
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/artio_selector.c
--- a/yt/frontends/artio/artio_headers/artio_selector.c
+++ b/yt/frontends/artio/artio_headers/artio_selector.c
@@ -26,7 +26,7 @@
 #include <stdlib.h>
 #include <stdio.h>
 #include <math.h>
-#ifdef MS_WIN32
+#ifdef _WIN32
 typedef __int64 int64_t;
 typedef __int32 int32_t;
 #else

diff -r e4fb689d67e0d846516cdf6659e4b24a1e85adcd -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e yt/frontends/artio/artio_headers/cosmology.c
--- a/yt/frontends/artio/artio_headers/cosmology.c
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -216,7 +216,7 @@
   f[3] = 1.5*c->OmegaM*y[2]/mu;
 }
 
-#ifdef MS_WIN32
+#ifdef _WIN32
 double asinh(double x){
     return log(x + sqrt((x * x) + 1.0));
 }


https://bitbucket.org/yt_analysis/yt/commits/209f3d0b9665/
Changeset:   209f3d0b9665
Branch:      yt
User:        jzuhone
Date:        2016-04-13 18:08:28+00:00
Summary:     rename libs --> std_libs
Affected #:  1 file

diff -r 75b415321e1af0fd1873fa9b7361cf15cf818f7e -r 209f3d0b966518dac95cff141d997d967d375ee5 setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,26 +42,26 @@
     omp_args = None
 
 if os.name == "nt":
-    libs = []
+    std_libs = []
 else:
-    libs = ["m"]
+    std_libs = ["m"]
 
 cython_extensions = [
     Extension("yt.analysis_modules.photon_simulator.utils",
               ["yt/analysis_modules/photon_simulator/utils.pyx"]),
     Extension("yt.analysis_modules.ppv_cube.ppv_utils",
               ["yt/analysis_modules/ppv_cube/ppv_utils.pyx"],
-              libraries=libs),
+              libraries=std_libs),
     Extension("yt.geometry.grid_visitors",
               ["yt/geometry/grid_visitors.pyx"],
               include_dirs=["yt/utilities/lib"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/grid_visitors.pxd"]),
     Extension("yt.geometry.grid_container",
               ["yt/geometry/grid_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/grid_container.pxd",
                        "yt/geometry/grid_visitors.pxd"]),
@@ -69,28 +69,28 @@
               ["yt/geometry/oct_container.pyx",
                "yt/utilities/lib/tsearch.c"],
               include_dirs=["yt/utilities/lib"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.oct_visitors",
               ["yt/geometry/oct_visitors.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.particle_oct_container",
               ["yt/geometry/particle_oct_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.selection_routines",
               ["yt/geometry/selection_routines.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/grid_traversal.pxd",
                        "yt/geometry/oct_container.pxd",
@@ -101,7 +101,7 @@
     Extension("yt.geometry.particle_deposit",
               ["yt/geometry/particle_deposit.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd",
@@ -109,7 +109,7 @@
     Extension("yt.geometry.particle_smooth",
               ["yt/geometry/particle_smooth.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd",
@@ -118,30 +118,30 @@
     Extension("yt.geometry.fake_octree",
               ["yt/geometry/fake_octree.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.utilities.spatial.ckdtree",
               ["yt/utilities/spatial/ckdtree.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs),
+              libraries=std_libs),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
-              libraries=libs, depends=["yt/utilities/lib/bitarray.pxd"]),
+              libraries=std_libs, depends=["yt/utilities/lib/bitarray.pxd"]),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               include_dirs=["yt/utilities/lib/"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
                        "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
                             "yt/geometry/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/amr_kdtools.pxd",
                        "yt/utilities/lib/grid_traversal.pxd",
@@ -151,12 +151,12 @@
               ["yt/utilities/lib/geometry_utils.pyx"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=libs, depends=["yt/utilities/lib/fp_utils.pxd"]),
+              libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd"]),
     Extension("yt.utilities.lib.marching_cubes",
               ["yt/utilities/lib/marching_cubes.pyx",
                "yt/utilities/lib/fixed_interpolator.c"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/fixed_interpolator.pxd",
                        "yt/utilities/lib/fixed_interpolator.h",
@@ -166,7 +166,7 @@
                "yt/utilities/lib/pixelization_constants.c"],
               include_dirs=["yt/utilities/lib/"],
               language="c++",
-              libraries=libs, depends=["yt/utilities/lib/fp_utils.pxd",
+              libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd",
                                         "yt/utilities/lib/pixelization_constants.h",
                                         "yt/utilities/lib/element_mappings.pxd"]),
     Extension("yt.utilities.lib.origami",
@@ -179,7 +179,7 @@
                "yt/utilities/lib/fixed_interpolator.c",
                "yt/utilities/lib/kdtree.c"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=libs,
+              libraries=std_libs,
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               depends=["yt/utilities/lib/fp_utils.pxd",
@@ -190,10 +190,10 @@
                        "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.element_mappings",
               ["yt/utilities/lib/element_mappings.pyx"],
-              libraries=libs, depends=["yt/utilities/lib/element_mappings.pxd"]),
+              libraries=std_libs, depends=["yt/utilities/lib/element_mappings.pxd"]),
     Extension("yt.utilities.lib.alt_ray_tracers",
               ["yt/utilities/lib/alt_ray_tracers.pyx"],
-              libraries=libs),
+              libraries=std_libs),
 ]
 
 lib_exts = [
@@ -206,7 +206,7 @@
     cython_extensions.append(
         Extension("yt.utilities.lib.{}".format(ext_name),
                   ["yt/utilities/lib/{}.pyx".format(ext_name)],
-                  libraries=libs, depends=["yt/utilities/lib/fp_utils.pxd"]))
+                  libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd"]))
 
 lib_exts = ["write_array", "ragged_arrays", "line_integral_convolution"]
 for ext_name in lib_exts:
@@ -218,7 +218,7 @@
     Extension("yt.analysis_modules.halo_finding.fof.EnzoFOF",
               ["yt/analysis_modules/halo_finding/fof/EnzoFOF.c",
                "yt/analysis_modules/halo_finding/fof/kd.c"],
-              libraries=libs),
+              libraries=std_libs),
     Extension("yt.analysis_modules.halo_finding.hop.EnzoHop",
               glob.glob("yt/analysis_modules/halo_finding/hop/*.c")),
     Extension("yt.frontends.artio._artio_caller",
@@ -236,10 +236,10 @@
               glob.glob("yt/utilities/spatial/src/*.c")),
     Extension("yt.visualization._MPL",
               ["yt/visualization/_MPL.c"],
-              libraries=libs),
+              libraries=std_libs),
     Extension("yt.utilities.data_point_utilities",
               ["yt/utilities/data_point_utilities.c"],
-              libraries=libs),
+              libraries=std_libs),
 ]
 
 # EMBREE
@@ -280,7 +280,7 @@
         ext.include_dirs += embree_inc_dir
         ext.library_dirs += embree_lib_dir
         ext.language = "c++"
-        ext.libraries += libs
+        ext.libraries += std_libs
         ext.libraries += [embree_lib_name]
 
     cython_extensions += embree_extensions


https://bitbucket.org/yt_analysis/yt/commits/d8eec89b2c86/
Changeset:   d8eec89b2c86
Branch:      yt
User:        jzuhone
Date:        2016-04-14 15:17:23+00:00
Summary:     Merge
Affected #:  185 files

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
@@ -12,4 +12,5 @@
 prune doc/source/reference/api/generated
 prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+recursive-include yt/visualization/volume_rendering/shaders *.fragmentshader *.vertexshader
 prune yt/frontends/_skeleton

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -7,12 +7,12 @@
 
 % To make this come out properly in landscape mode, do one of the following
 % 1.
-%  pdflatex latexsheet.tex
+%  pdflatex cheatsheet.tex
 %
 % 2.
-%  latex latexsheet.tex
-%  dvips -P pdf  -t landscape latexsheet.dvi
-%  ps2pdf latexsheet.ps
+%  latex cheatsheet.tex
+%  dvips -P pdf  -t landscape cheatsheet.dvi
+%  ps2pdf cheatsheet.ps
 
 
 % If you're reading this, be prepared for confusion.  Making this was
@@ -45,7 +45,7 @@
 
 % Turn off header and footer
 \pagestyle{empty}
- 
+
 
 % Redefine section commands to use less space
 \makeatletter
@@ -117,26 +117,26 @@
 including a list of the available flags.
 
 \texttt{iyt}\textemdash\ Load yt and IPython. \\
-\texttt{yt load} {\it dataset}   \textemdash\ Load a single dataset.  \\
+\texttt{yt load} \textit{dataset}   \textemdash\ Load a single dataset.  \\
 \texttt{yt help} \textemdash\ Print yt help information. \\
-\texttt{yt stats} {\it dataset} \textemdash\ Print stats of a dataset. \\
+\texttt{yt stats} \textit{dataset} \textemdash\ Print stats of a dataset. \\
 \texttt{yt update} \textemdash\ Update yt to most recent version.\\
 \texttt{yt update --all} \textemdash\ Update yt and dependencies to most recent version. \\
 \texttt{yt version} \textemdash\ yt installation information. \\
 \texttt{yt notebook} \textemdash\ Run the IPython notebook server. \\
-\texttt{yt upload\_image} {\it image.png} \textemdash\ Upload PNG image to imgur.com. \\
-\texttt{yt upload\_notebook} {\it notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
-\texttt{yt plot} {\it dataset} \textemdash\ Create a set of images.\\
-\texttt{yt render} {\it dataset} \textemdash\ Create a simple
+\texttt{yt upload\_image} \textit{image.png} \textemdash\ Upload PNG image to imgur.com. \\
+\texttt{yt upload\_notebook} \textit{notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
+\texttt{yt plot} \textit{dataset} \textemdash\ Create a set of images.\\
+\texttt{yt render} \textit{dataset} \textemdash\ Create a simple
  volume rendering. \\
-\texttt{yt mapserver} {\it dataset} \textemdash\ View a plot/projection in a Gmaps-like
+\texttt{yt mapserver} \textit{dataset} \textemdash\ View a plot/projection in a Gmaps-like
  interface. \\
-\texttt{yt pastebin} {\it text.out} \textemdash\ Post text to the pastebin at
- paste.yt-project.org. \\ 
-\texttt{yt pastebin\_grab} {\it identifier} \textemdash\ Print content of pastebin to
+\texttt{yt pastebin} \textit{text.out} \textemdash\ Post text to the pastebin at
+ paste.yt-project.org. \\
+\texttt{yt pastebin\_grab} \textit{identifier} \textemdash\ Print content of pastebin to
  STDOUT. \\
 \texttt{yt bugreport} \textemdash\ Report a yt bug. \\
-\texttt{yt hop} {\it dataset} \textemdash\  Run hop on a dataset. \\
+\texttt{yt hop} \textit{dataset} \textemdash\  Run hop on a dataset. \\
 
 \subsection{yt Imports}
 In order to use yt, Python must load the relevant yt modules into memory.
@@ -144,15 +144,15 @@
 used as part of a script.
 \newlength{\MyLen}
 \settowidth{\MyLen}{\texttt{letterpaper}/\texttt{a4paper} \ }
-\texttt{import yt}  \textemdash\ 
+\texttt{import yt}  \textemdash\
 Load yt. \\
-\texttt{from yt.config import ytcfg}  \textemdash\ 
+\texttt{from yt.config import ytcfg}  \textemdash\
 Used to set yt configuration options.
 If used, must be called before importing any other module.\\
-\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\ 
+\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\
 Load halo finding modules. Other modules
-are loaded in a similar way by swapping the 
-{\em emphasized} text.
+are loaded in a similar way by swapping the
+\emph{emphasized} text.
 See the \textbf{Analysis Modules} section for a listing and short descriptions of each.
 
 \subsection{YTArray}
@@ -163,32 +163,32 @@
 very brief list of some useful ones.
 \settowidth{\MyLen}{\texttt{multicol} }\\
 \texttt{v = a.in\_cgs()} \textemdash\ Return the array in CGS units \\
-\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\ 
+\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\
 \texttt{v = a.max(), a.min()} \textemdash\ Return maximum, minimum of \texttt{a}. \\
 \texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max,
 min value of \texttt{a}.\\
-\texttt{v = a[}{\it index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location {\it index}.\\
-\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from
+\texttt{v = a[}\textit{index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location \textit{index}.\\
+\texttt{b = a[}\textit{i:j}\texttt{]} \textemdash\ Select the slice of values from
 \texttt{a} between
-locations {\it i} to {\it j-1} saved to a new Numpy array \texttt{b} with length {\it j-i}. \\
+locations \textit{i} to \textit{j-1} saved to a new Numpy array \texttt{b} with length \textit{j-i}. \\
 \texttt{sel = (a > const)} \textemdash\ Create a new boolean Numpy array
 \texttt{sel}, of the same shape as \texttt{a},
 that marks which values of \texttt{a > const}. Other operators (e.g. \textless, !=, \%) work as well.\\
 \texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of
 elements from \texttt{a} that correspond to elements of \texttt{sel}
-that are {\it True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
-\texttt{a.write\_hdf5({\it filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file {\it filename.h5}.\\
+that are \textit{True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
+\texttt{a.write\_hdf5(\textit{filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file \textit{filename.h5}.\\
 
 \subsection{IPython Tips}
 \settowidth{\MyLen}{\texttt{multicol} }
 These tips work if IPython has been loaded, typically either by invoking
 \texttt{iyt} or \texttt{yt load} on the command line, or using the IPython notebook (\texttt{yt notebook}).
 \texttt{Tab complete} \textemdash\ IPython will attempt to auto-complete a
-variable or function name when the \texttt{Tab} key is pressed, e.g. {\it HaloFi}\textendash\texttt{Tab} would auto-complete
-to {\it HaloFinder}. This also works with imports, e.g. {\it from numpy.random.}\textendash\texttt{Tab}
+variable or function name when the \texttt{Tab} key is pressed, e.g. \textit{HaloFi}\textendash\texttt{Tab} would auto-complete
+to \textit{HaloFinder}. This also works with imports, e.g. \textit{from numpy.random.}\textendash\texttt{Tab}
 would give you a list of random functions (note the trailing period before hitting \texttt{Tab}).\\
 \texttt{?, ??} \textemdash\ Appending one or two question marks at the end of any object gives you
-detailed information about it, e.g. {\it variable\_name}?.\\
+detailed information about it, e.g. \textit{variable\_name}?.\\
 Below a few IPython ``magics'' are listed, which are IPython-specific shortcut commands.\\
 \texttt{\%paste} \textemdash\ Paste content from the system clipboard into the IPython shell.\\
 \texttt{\%hist} \textemdash\ Print recent command history.\\
@@ -204,40 +204,40 @@
 
 \subsection{Load and Access Data}
 The first step in using yt is to reference a simulation snapshot.
-After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
+After that, simulation data is generally accessed in yt using \textit{Data Containers} which are Python objects
 that define a region of simulation space from which data should be selected.
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{ds = yt.load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{ds = yt.load(}\textit{dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
 \texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
-\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Copies the contents of {\it field} into the
+\texttt{a = dd[}\textit{field\_name}\texttt{]} \textemdash\ Copies the contents of \textit{field} into the
 YTArray \texttt{a}. Similarly for other data containers.\\
 \texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
 \texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
 in the snapshot. \\
 \texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
-container. {\it cen} may be a coordinate, or ``max'' which 
-centers on the max density point. {\it radius} may be a float in 
-code units or a tuple of ({\it length, unit}).\\
+\texttt{sp = ds.sphere(}\textit{cen}\texttt{,}\textit{radius}\texttt{)} \textemdash\   Create a spherical data
+container. \textit{cen} may be a coordinate, or ``max'' which
+centers on the max density point. \textit{radius} may be a float in
+code units or a tuple of (\textit{length, unit}).\\
 
-\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
-rectilinear data container. {\it cen} is required but not used.
-{\it left} and {\it right edge} are coordinate values that define the region.
+\texttt{re = ds.region(\textit{cen}, \textit{left edge}, \textit{right edge})} \textemdash\ Create a
+rectilinear data container. \textit{cen} is required but not used.
+\textit{left} and \textit{right edge} are coordinate values that define the region.
 
-\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
-Create a cylindrical data container centered at {\it cen} along the 
-direction set by {\it normal},with total length
- 2$\times${\it height} and with radius {\it radius}. \\
- 
-\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{di = ds.disk(\textit{cen}, \textit{normal}, \textit{radius}, \textit{height})} \textemdash\
+Create a cylindrical data container centered at \textit{cen} along the
+direction set by \textit{normal},with total length
+ 2$\times$\textit{height} and with radius \textit{radius}. \\
+
+\texttt{ds.save\_object(sp, \textit{``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object(\textit{``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
 
 
 \subsection{Defining New Fields}
-\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory. 
+\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory.
 Field can either be created before a dataset is loaded using \texttt{add\_field}:
-\texttt{def \_metal\_mass({\it field},{\it data})}\\
+\texttt{def \_metal\_mass(\textit{field},\textit{data})}\\
 \texttt{\hspace{4 mm} return data["metallicity"]*data["cell\_mass"]}\\
 \texttt{add\_field("metal\_mass", units='g', function=\_metal\_mass)}\\
 Or added to an existing dataset using \texttt{ds.add\_field}:
@@ -245,34 +245,34 @@
 
 \subsection{Slices and Projections}
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = yt.SlicePlot(ds, {\it axis or normal vector}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
-perpendicular to {\it axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
-{\it width} in code units or a (value, unit) tuple. Hint: try {\it yt.SlicePlot?} in IPython to see additional parameters.\\
-\texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
+\texttt{slc = yt.SlicePlot(ds, \textit{axis or normal vector}, \textit{field}, \textit{center=}, \textit{width=}, \textit{weight\_field=}, \textit{additional parameters})} \textemdash\ Make a slice plot
+perpendicular to \textit{axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of \textit{field} weighted by \textit{weight\_field} at (code-units) \textit{center} with
+\textit{width} in code units or a (value, unit) tuple. Hint: try \textit{yt.SlicePlot?} in IPython to see additional parameters.\\
+\texttt{slc.save(\textit{file\_prefix})} \textemdash\ Save the slice to a png with name prefix \textit{file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = yt.ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = yt.OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = yt.ProjectionPlot(ds, \textit{axis}, \textit{field}, \textit{addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = yt.OffAxisProjectionPlot(ds, \textit{normal}, \textit{fields}, \textit{center=}, \textit{width=}, \textit{depth=},\textit{north\_vector=},\textit{weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}
 \settowidth{\MyLen}{\texttt{multicol} }
-Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a {\it plot\_args} dict that is fed to matplotlib annotator. \\
-\texttt{velocity({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
-\texttt{magnetic\_field({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
-\texttt{quiver({\it field\_x},{\it field\_y},{\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \\
-\texttt{contour({\it field=},{\it ncont=},{\it factor=},{\it clim=},{\it take\_log=}, {\it additional parameters})} \textemdash Plots a number of contours {\it ncont} to interpolate {\it field} optionally using {\it take\_log}, upper and lower {\it c}ontour{\it lim}its and {\it factor} number of points in the interpolation.\\
-\texttt{grids({\it alpha=}, {\it draw\_ids=}, {\it periodic=}, {\it min\_level=}, {\it max\_level=})} \textemdash Add grid boundaries. \\
-\texttt{streamlines({\it field\_x},{\it field\_y},{\it factor=},{\it density=})}\\
-\texttt{clumps({\it clumplist})} \textemdash\ Generate {\it clumplist} using the clump finder and plot. \\
-\texttt{arrow({\it pos}, {\it code\_size})} Add an arrow at a {\it pos}ition. \\
-\texttt{point({\it pos}, {\it text})} \textemdash\ Add text at a {\it pos}ition. \\
-\texttt{marker({\it pos}, {\it marker=})} \textemdash\ Add a matplotlib-defined marker at a {\it pos}ition. \\
-\texttt{sphere({\it center}, {\it radius}, {\it text=})} \textemdash\ Draw a circle and append {\it text}.\\
-\texttt{hop\_circles({\it hop\_output}, {\it max\_number=}, {\it annotate=}, {\it min\_size=}, {\it max\_size=}, {\it font\_size=}, {\it print\_halo\_size=}, {\it fixed\_radius=}, {\it min\_mass=}, {\it print\_halo\_mass=}, {\it width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles ({\it size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
-\texttt{hop\_particles({\it hop\_output},{\it max\_number=},{\it p\_size=},\\
-{\it min\_size},{\it alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
-\texttt{particles({\it width},{\it p\_size=},{\it col=}, {\it marker=}, {\it stride=}, {\it ptype=}, {\it stars\_only=}, {\it dm\_only=}, {\it minimum\_mass=}, {\it alpha=})}  \textemdash\  Draw particles of {\it p\_size} pixels in a slab of {\it width} with {\it col}or using a matplotlib {\it marker} plotting only every {\it stride} number of particles.\\
-\texttt{title({\it text})}\\
+Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a \textit{plot\_args} dict that is fed to matplotlib annotator. \\
+\texttt{velocity(\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
+\texttt{magnetic\_field(\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
+\texttt{quiver(\textit{field\_x},\textit{field\_y},\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \\
+\texttt{contour(\textit{field=},\textit{ncont=},\textit{factor=},\textit{clim=},\textit{take\_log=}, \textit{additional parameters})} \textemdash Plots a number of contours \textit{ncont} to interpolate \textit{field} optionally using \textit{take\_log}, upper and lower \textit{c}ontour\textit{lim}its and \textit{factor} number of points in the interpolation.\\
+\texttt{grids(\textit{alpha=}, \textit{draw\_ids=}, \textit{periodic=}, \textit{min\_level=}, \textit{max\_level=})} \textemdash Add grid boundaries. \\
+\texttt{streamlines(\textit{field\_x},\textit{field\_y},\textit{factor=},\textit{density=})}\\
+\texttt{clumps(\textit{clumplist})} \textemdash\ Generate \textit{clumplist} using the clump finder and plot. \\
+\texttt{arrow(\textit{pos}, \textit{code\_size})} Add an arrow at a \textit{pos}ition. \\
+\texttt{point(\textit{pos}, \textit{text})} \textemdash\ Add text at a \textit{pos}ition. \\
+\texttt{marker(\textit{pos}, \textit{marker=})} \textemdash\ Add a matplotlib-defined marker at a \textit{pos}ition. \\
+\texttt{sphere(\textit{center}, \textit{radius}, \textit{text=})} \textemdash\ Draw a circle and append \textit{text}.\\
+\texttt{hop\_circles(\textit{hop\_output}, \textit{max\_number=}, \textit{annotate=}, \textit{min\_size=}, \textit{max\_size=}, \textit{font\_size=}, \textit{print\_halo\_size=}, \textit{fixed\_radius=}, \textit{min\_mass=}, \textit{print\_halo\_mass=}, \textit{width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles (\textit{size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
+\texttt{hop\_particles(\textit{hop\_output},\textit{max\_number=},\textit{p\_size=},\\
+\textit{min\_size},\textit{alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
+\texttt{particles(\textit{width},\textit{p\_size=},\textit{col=}, \textit{marker=}, \textit{stride=}, \textit{ptype=}, \textit{stars\_only=}, \textit{dm\_only=}, \textit{minimum\_mass=}, \textit{alpha=})}  \textemdash\  Draw particles of \textit{p\_size} pixels in a slab of \textit{width} with \textit{col}or using a matplotlib \textit{marker} plotting only every \textit{stride} number of particles.\\
+\texttt{title(\textit{text})}\\
 
 \subsection{The $\sim$/.yt/ Directory}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -297,12 +297,12 @@
 
 
 \subsection{Parallel Analysis}
-\settowidth{\MyLen}{\texttt{multicol}} 
+\settowidth{\MyLen}{\texttt{multicol}}
 Nearly all of yt is parallelized using
-MPI.  The {\it mpi4py} package must be installed for parallelism in yt.  To
-install {\it pip install mpi4py} on the command line usually works.
+MPI\@.  The \textit{mpi4py} package must be installed for parallelism in yt.  To
+install \textit{pip install mpi4py} on the command line usually works.
 Execute python in parallel similar to this:\\
-{\it mpirun -n 12 python script.py}\\
+\textit{mpirun -n 12 python script.py}\\
 The file \texttt{script.py} must call the \texttt{yt.enable\_parallelism()} to
 turn on yt's parallelism.  If this doesn't happen, all cores will execute the
 same serial yt script.  This command may differ for each system on which you use
@@ -320,12 +320,12 @@
 \texttt{hg clone https://bitbucket.org/yt\_analysis/yt} \textemdash\ Clone a copy of yt. \\
 \texttt{hg status} \textemdash\ Files changed in working directory.\\
 \texttt{hg diff} \textemdash\ Print diff of all changed files in working directory. \\
-\texttt{hg diff -r{\it RevX} -r{\it RevY}} \textemdash\ Print diff of all changes between revision {\it RevX} and {\it RevY}.\\
+\texttt{hg diff -r\textit{RevX} -r\textit{RevY}} \textemdash\ Print diff of all changes between revision \textit{RevX} and \textit{RevY}.\\
 \texttt{hg log} \textemdash\ History of changes.\\
-\texttt{hg cat -r{\it RevX file}} \textemdash\ Print the contents of {\it file} from revision {\it RevX}.\\
+\texttt{hg cat -r\textit{RevX file}} \textemdash\ Print the contents of \textit{file} from revision \textit{RevX}.\\
 \texttt{hg heads} \textemdash\ Print all the current heads. \\
-\texttt{hg revert -r{\it RevX file}} \textemdash\ Revert {\it file} to revision {\it RevX}. On-disk changed version is
-moved to {\it file.orig}. \\
+\texttt{hg revert -r\textit{RevX file}} \textemdash\ Revert \textit{file} to revision \textit{RevX}. On-disk changed version is
+moved to \textit{file.orig}. \\
 \texttt{hg commit} \textemdash\ Commit changes to repository. \\
 \texttt{hg push} \textemdash\ Push changes to default remote repository. \\
 \texttt{hg pull} \textemdash\ Pull changes from default remote repository. \\

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -17,7 +17,7 @@
 #
 # By default this will install yt from source.
 #
-# If you experience problems, please visit the Help section at 
+# If you experience problems, please visit the Help section at
 # http://yt-project.org.
 #
 DEST_SUFFIX="yt-conda"
@@ -298,7 +298,7 @@
 
 if [ $INST_UNSTRUCTURED -eq 1 ]
 then
-  YT_DEPS+=('netcdf4')   
+  YT_DEPS+=('netcdf4')
 fi
 
 # Here is our dependency list for yt
@@ -361,7 +361,7 @@
 echo "yt and the Conda system are now installed in $DEST_DIR ."
 echo
 echo "You must now modify your PATH variable by prepending:"
-echo 
+echo
 echo "   $DEST_DIR/bin"
 echo
 echo "On Bash-style shells you can copy/paste the following command to "

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/helper_scripts/code_support.py
--- a/doc/helper_scripts/code_support.py
+++ b/doc/helper_scripts/code_support.py
@@ -85,7 +85,7 @@
 print("|| . ||", end=' ')
 for c in code_names:
     print("%s || " % (c), end=' ')
-print() 
+print()
 
 for vn in vals:
     print("|| !%s ||" % (vn), end=' ')

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -19,7 +19,7 @@
 CWD = os.getcwd()
 ytcfg["yt", "serialize"] = "False"
 PARALLEL_TEST = {"rockstar_nest": "3"}
-BLACKLIST = []
+BLACKLIST = ["opengl_ipython", "opengl_vr"]
 
 
 def prep_dirs():

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/helper_scripts/table.py
--- a/doc/helper_scripts/table.py
+++ b/doc/helper_scripts/table.py
@@ -44,7 +44,7 @@
       "A bunch of illustrated examples of how to do things"),
      ("reference/index.html", "Reference Materials",
       "A list of all bundled fields, API documentation, the Change Log..."),
-     ("faq/index.html", "FAQ", 
+     ("faq/index.html", "FAQ",
       "Frequently Asked Questions: answered for you!")
   ]),
 ]

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/helper_scripts/update_recipes.py
--- a/doc/helper_scripts/update_recipes.py
+++ b/doc/helper_scripts/update_recipes.py
@@ -66,7 +66,7 @@
             written = cond_output(output, written)
             ofn = "%s/%s_%s" % (ndir, fn, os.path.basename(ifn))
             open(ofn, "wb").write(open(ifn, "rb").read())
-            output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) + 
+            output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) +
                          "   :width: 240\n" +
                          "   :target: ../_images/%s_%s\n" % (fn, os.path.basename(ifn))
                         )

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,13 +1,13 @@
 #
 # Hi there!  Welcome to the yt installation script.
 #
-# First things first, if you experience problems, please visit the Help 
+# First things first, if you experience problems, please visit the Help
 # section at http://yt-project.org.
 #
 # This script is designed to create a fully isolated Python installation
 # with the dependencies you need to run yt.
 #
-# There are a few options, but you only need to set *one* of them, which is 
+# There are a few options, but you only need to set *one* of them, which is
 # the next one, DEST_DIR:
 
 DEST_SUFFIX="yt-`uname -m`"
@@ -307,7 +307,7 @@
         echo "  * gcc-{,c++,gfortran}"
         echo "  * make"
         echo "  * patch"
-        echo 
+        echo
         echo "You can accomplish this by executing:"
         echo "$ sudo yum install gcc gcc-c++ gcc-gfortran make patch zip"
         echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
@@ -495,7 +495,7 @@
 if [ $INST_PY3 -eq 1 ]
 then
      PYTHON_EXEC='python3.4'
-else 
+else
      PYTHON_EXEC='python2.7'
 fi
 
@@ -513,7 +513,7 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
-    if [[ $LIB =~ .*mercurial.* ]] 
+    if [[ $LIB =~ .*mercurial.* ]]
     then
         PYEXE="python2.7"
     else
@@ -620,9 +620,9 @@
 CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
 BZLIB='bzip2-1.0.6'
-FREETYPE_VER='freetype-2.4.12' 
+FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.5.0'
-HDF5='hdf5-1.8.14' 
+HDF5='hdf5-1.8.14'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.4.3'
@@ -880,7 +880,7 @@
 
 # This fixes problems with gfortran linking.
 unset LDFLAGS
- 
+
 echo "Installing pip"
 ( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
 ( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -1006,7 +1006,7 @@
 cd $MY_PWD
 
 if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
-    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
+    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]]
 then
     if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -40,7 +40,7 @@
         padding-bottom: 10px;
     }
     /* since 3.1.0 */
-    .navbar-collapse.collapse.in { 
+    .navbar-collapse.collapse.in {
         display: block!important;
     }
     .collapsing {
@@ -48,7 +48,7 @@
     }
 }
 
-/* 
+/*
 
 Sphinx code literals conflict with the notebook code tag, so we special-case
 literals that are inside text.
@@ -56,7 +56,7 @@
 */
 
 p code {
-    color:  #d14;    
+    color:  #d14;
     white-space: nowrap;
     font-size: 90%;
     background-color: #f9f2f4;
@@ -93,16 +93,16 @@
 */
 
 *[id]:before :not(p) {
-  display: block; 
-  content: " "; 
-  margin-top: -45px; 
-  height: 45px; 
-  visibility: hidden; 
+  display: block;
+  content: " ";
+  margin-top: -45px;
+  height: 45px;
+  visibility: hidden;
 }
 
 /*
 
-Make tables span only half the page. 
+Make tables span only half the page.
 
 */
 

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/source/about/index.rst
--- a/doc/source/about/index.rst
+++ b/doc/source/about/index.rst
@@ -12,10 +12,10 @@
 -----------
 
 yt is a toolkit for analyzing and visualizing quantitative data.  Originally
-written to analyze 3D grid-based astrophysical simulation data, 
+written to analyze 3D grid-based astrophysical simulation data,
 it has grown to handle any kind of data represented in a 2D or 3D volume.
-yt is an Python-based open source project and is open for anyone to use or 
-contribute code.  The entire source code and history is available to all 
+yt is an Python-based open source project and is open for anyone to use or
+contribute code.  The entire source code and history is available to all
 at https://bitbucket.org/yt_analysis/yt .
 
 .. _who-is-yt:
@@ -23,16 +23,16 @@
 Who is yt?
 ----------
 
-As an open-source project, yt has a large number of user-developers.  
-In September of 2014, the yt developer community collectively decided to endow 
-the title of *member* on individuals who had contributed in a significant way 
-to the project.  For a list of those members and a description of their 
-contributions to the code, see 
+As an open-source project, yt has a large number of user-developers.
+In September of 2014, the yt developer community collectively decided to endow
+the title of *member* on individuals who had contributed in a significant way
+to the project.  For a list of those members and a description of their
+contributions to the code, see
 `our members website. <http://yt-project.org/members.html>`_
 
-For an up-to-date list of everyone who has contributed to the yt codebase, 
-see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.  
-For a more detailed breakup of contributions made by individual users, see out 
+For an up-to-date list of everyone who has contributed to the yt codebase,
+see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.
+For a more detailed breakup of contributions made by individual users, see out
 `Open HUB page <https://www.openhub.net/p/yt_amr/contributors?query=&sort=commits>`_.
 
 History of yt
@@ -40,17 +40,17 @@
 
 yt was originally begun by Matthew Turk in 2007 in the course of his graduate
 studies in computational astrophysics.  The code was developed
-as a simple data-reader and exporter for grid-based hydrodynamical simulation 
-data outputs from the *Enzo* code.  Over the next few years, he invited 
+as a simple data-reader and exporter for grid-based hydrodynamical simulation
+data outputs from the *Enzo* code.  Over the next few years, he invited
 collaborators and friends to contribute and use yt.  As the community grew,
-so did the capabilities of yt.  It is now a community-developed project with 
-contributions from many people, the hospitality of several institutions, and 
-benefiting from numerous grants.  With this community-driven approach 
-and contributions from a sizeable population of developers, it has evolved 
-into a fully-featured toolkit for analysis and visualization of 
-multidimensional data.  It relies on no proprietary software -- although it 
-can be and has been extended to interface with proprietary software and 
-libraries -- and has been designed from the ground up to enable users to be 
+so did the capabilities of yt.  It is now a community-developed project with
+contributions from many people, the hospitality of several institutions, and
+benefiting from numerous grants.  With this community-driven approach
+and contributions from a sizeable population of developers, it has evolved
+into a fully-featured toolkit for analysis and visualization of
+multidimensional data.  It relies on no proprietary software -- although it
+can be and has been extended to interface with proprietary software and
+libraries -- and has been designed from the ground up to enable users to be
 as immersed in the data as they desire.
 
 How do I contact yt?
@@ -58,7 +58,7 @@
 
 If you have any questions about the code, please contact the `yt users email
 list <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_.  If
-you're having other problems, please follow the steps in 
+you're having other problems, please follow the steps in
 :ref:`asking-for-help`.
 
 How do I cite yt?
@@ -70,7 +70,7 @@
 entry: ::
 
    @ARTICLE{2011ApJS..192....9T,
-      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and 
+      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
    	{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
        title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
      journal = {\apjs},

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -1,76 +1,119 @@
 .. _absorption_spectrum:
 
-Absorption Spectrum
-===================
+Creating Absorption Spectra
+===========================
 
 .. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Absorption line spectra, such as shown below, can be made with data created 
-by the (:ref:`light-ray-generator`).  For each element of the ray, column 
-densities are calculated multiplying the number density within a grid cell 
-with the path length of the ray through the cell.  Line profiles are 
-generated using a voigt profile based on the temperature field.  The lines 
-are then shifted according to the redshift recorded by the light ray tool 
-and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the 
-peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to 
-:meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
+Absorption line spectra are spectra generated using bright background sources
+to illuminate tenuous foreground material and are primarily used in studies
+of the circumgalactic medium and intergalactic medium.  These spectra can
+be created using the
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+and
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+analysis modules.
 
-The spectrum generator will output a file containing the wavelength and 
-normalized flux.  It will also output a text file listing all important lines.
+The 
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` class
+and its workhorse method
+:meth:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum.make_spectrum`
+return two arrays, one with wavelengths, the other with the normalized
+flux values at each of the wavelength values.  It can also output a text file
+listing all important lines.
+
+For example, here is an absorption spectrum for the wavelength range from 900 
+to 1800 Angstroms made with a light ray extending from z = 0 to z = 0.4:
 
 .. image:: _images/spectrum_full.png
    :width: 500
 
-An absorption spectrum for the wavelength range from 900 to 1800 Angstroms 
-made with a light ray extending from z = 0 to z = 0.4.
+And a zoom-in on the 1425-1450 Angstrom window:
 
 .. image:: _images/spectrum_zoom.png
    :width: 500
 
-A zoom-in of the above spectrum.
+Method for Creating Absorption Spectra
+--------------------------------------
 
-Creating an Absorption Spectrum
--------------------------------
+Once a
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+has been created traversing a dataset using the :ref:`light-ray-generator`,
+a series of arrays store the various fields of the gas parcels (represented
+as cells) intersected along the ray.
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+steps through each element of the
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`'s
+arrays and calculates the column density for desired ion by multiplying its
+number density with the path length through the cell.  Using these column
+densities along with temperatures to calculate thermal broadening, voigt
+profiles are deposited on to a featureless background spectrum.  By default,
+the peculiar velocity of the gas is included as a doppler redshift in addition
+to any cosmological redshift of the data dump itself.
 
-To instantiate an AbsorptionSpectrum object, the arguments required are the 
-minimum and maximum wavelengths, and the number of wavelength bins.
+Subgrid Deposition
+^^^^^^^^^^^^^^^^^^
+
+For features not resolved (i.e. possessing narrower width than the spectral
+resolution),
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+performs subgrid deposition.  The subgrid deposition algorithm creates a number
+of smaller virtual bins, by default the width of the virtual bins is 1/10th
+the width of the spectral feature.  The Voigt profile is then deposited
+into these virtual bins where it is resolved, and then these virtual bins
+are numerically integrated back to the resolution of the original spectral bin
+size, yielding accurate equivalent widths values.
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+informs the user how many spectral features are deposited in this fashion.
+
+Tutorial on Creating an Absorption Spectrum
+-------------------------------------------
+
+Initializing `AbsorptionSpectrum` Class
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To instantiate an
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+object, the arguments required are the
+minimum and maximum wavelengths (assumed to be in Angstroms), and the number
+of wavelength bins to span this range (including the endpoints)
 
 .. code-block:: python
 
   from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
-  sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
+  sp = AbsorptionSpectrum(900.0, 1800.0, 10001)
 
 Adding Features to the Spectrum
--------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Absorption lines and continuum features can then be added to the spectrum.  
-To add a line, you must know some properties of the line: the rest wavelength, 
-f-value, gamma value, and the atomic mass in amu of the atom.  That line must 
+Absorption lines and continuum features can then be added to the spectrum.
+To add a line, you must know some properties of the line: the rest wavelength,
+f-value, gamma value, and the atomic mass in amu of the atom.  That line must
 be tied in some way to a field in the dataset you are loading, and this field
-must be added to the LightRay object when it is created.  Below, we will 
-add the H Lyman-alpha line, which is tied to the neutral hydrogen field 
+must be added to the LightRay object when it is created.  Below, we will
+add the H Lyman-alpha line, which is tied to the neutral hydrogen field
 ('H_number_density').
 
 .. code-block:: python
-  
+
   my_label = 'HI Lya'
   field = 'H_number_density'
   wavelength = 1215.6700 # Angstroms
   f_value = 4.164E-01
   gamma = 6.265e+08
   mass = 1.00794
-  
+
   sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
 
-In the above example, the *field* argument tells the spectrum generator which 
-field from the ray data to use to calculate the column density.  The 
-``label_threshold`` keyword tells the spectrum generator to add all lines 
-above a column density of 10 :superscript:`10` cm :superscript:`-2` to the 
-text line list.  If None is provided, as is the default, no lines of this 
-type will be added to the text list.
+In the above example, the *field* argument tells the spectrum generator which
+field from the ray data to use to calculate the column density.  The
+``label_threshold`` keyword tells the spectrum generator to add all lines
+above a column density of 10 :superscript:`10` cm :superscript:`-2` to the
+text line list output at the end.  If None is provided, as is the default,
+no lines of this type will be added to the text list.
 
-Continuum features with optical depths that follow a power law can also be 
+Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
 Below, we will add H Lyman continuum.
@@ -82,29 +125,29 @@
   wavelength = 912.323660 # Angstroms
   normalization = 1.6e17
   index = 3.0
-  
+
   sp.add_continuum(my_label, field, wavelength, normalization, index)
 
 Making the Spectrum
--------------------
+^^^^^^^^^^^^^^^^^^^
 
-Once all the lines and continuum are added, it is time to make a spectrum out 
+Once all the lines and continuum are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python
 
-  wavelength, flux = sp.make_spectrum('lightray.h5', 
-                                      output_file='spectrum.fits', 
-                                      line_list_file='lines.txt',
-                                      use_peculiar_velocity=True)
+  wavelength, flux = sp.make_spectrum('lightray.h5',
+                                      output_file='spectrum.fits',
+                                      line_list_file='lines.txt')
 
-A spectrum will be made using the specified ray data and the wavelength and 
-flux arrays will also be returned.  If ``use_peculiar_velocity`` is set to 
-False, the lines will only be shifted according to the redshift.
+A spectrum will be made using the specified ray data and the wavelength and
+flux arrays will also be returned.  If you set the optional
+``use_peculiar_velocity`` keyword to False, the lines will not incorporate
+doppler redshifts to shift the deposition of the line features.
 
-Three output file formats are supported for writing out the spectrum: fits, 
-hdf5, and ascii.  The file format used is based on the extension provided 
-in the ``output_file`` keyword: ``.fits`` for a fits file, 
+Three output file formats are supported for writing out the spectrum: fits,
+hdf5, and ascii.  The file format used is based on the extension provided
+in the ``output_file`` keyword: ``.fits`` for a fits file,
 ``.h5`` for an hdf5 file, and anything else for an ascii file.
 
 .. note:: To write out a fits file, you must install the `astropy <http://www.astropy.org>`_ python library in order to access the astropy.io.fits module.  You can usually do this by simply running `pip install astropy` at the command line.
@@ -112,29 +155,30 @@
 Generating Spectra in Parallel
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The spectrum generator can be run in parallel simply by following the procedures 
-laid out in :ref:`parallel-computation` for running yt scripts in parallel.  
-Spectrum generation is parallelized using a multi-level strategy where each 
-absorption line is deposited by a different processor.  If the number of available 
-processors is greater than the number of lines, then the deposition of 
-individual lines will be divided over multiple processors.
+The `AbsorptionSpectrum` analysis module can be run in parallel simply by
+following the procedures laid out in :ref:`parallel-computation` for running
+yt scripts in parallel.  Spectrum generation is parallelized using a multi-level
+strategy where each absorption line is deposited by a different processor.
+If the number of available processors is greater than the number of lines,
+then the deposition of individual lines will be divided over multiple
+processors.
 
-Fitting an Absorption Spectrum
-------------------------------
+Fitting Absorption Spectra
+==========================
 
 .. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
 
 This tool can be used to fit absorption spectra, particularly those
 generated using the (``AbsorptionSpectrum``) tool. For more details
 on its uses and implementation please see (`Egan et al. (2013)
-<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we 
+<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we
 encourage you to cite accordingly.
 
 Loading an Absorption Spectrum
 ------------------------------
 
-To load an absorption spectrum created by 
-(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``), 
+To load an absorption spectrum created by
+(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``),
 we specify the output file name. It is advisable to use either an .h5
 or .fits file, rather than an ascii file to save the spectrum as rounding
 errors produced in saving to a ascii file will negatively impact fit quality.
@@ -149,7 +193,7 @@
 Specifying Species Properties
 -----------------------------
 
-Before fitting a spectrum, you must specify the properties of all the 
+Before fitting a spectrum, you must specify the properties of all the
 species included when generating the spectrum.
 
 The physical properties needed for each species are the rest wavelength,
@@ -160,7 +204,7 @@
 
 To fine tune the fitting procedure and give results in a minimal
 number of optimizing steps, we specify expected maximum and minimum
-values for the column density, doppler parameter, and redshift. These 
+values for the column density, doppler parameter, and redshift. These
 values can be well outside the range of expected values for a typical line
 and are mostly to prevent the algorithm from fitting to negative values
 or becoming numerically unstable.
@@ -204,7 +248,7 @@
 --------------------------
 
 After loading a spectrum and specifying the properties of the species
-used to generate the spectrum, an appropriate fit can be generated. 
+used to generate the spectrum, an appropriate fit can be generated.
 
 .. code-block:: python
 
@@ -219,19 +263,19 @@
 recommended to fit species the generate multiple lines first, as a fit
 will only be accepted if all of the lines are fit appropriately using
 a single set of parameters. At the moment no cross correlation between
-lines of different species is performed. 
+lines of different species is performed.
 
-The parameters of the lines that are needed to fit the spectrum are contained 
+The parameters of the lines that are needed to fit the spectrum are contained
 in the ``fitted_lines`` variable. Each species given in ``orderFits`` will
-be a key in the ``fitted_lines`` dictionary. The entry for each species 
-key will be another dictionary containing entries for 'N','b','z', and 
+be a key in the ``fitted_lines`` dictionary. The entry for each species
+key will be another dictionary containing entries for 'N','b','z', and
 'group#' which are the column density, doppler parameter, redshift,
-and associate line complex respectively. The i :superscript:`th` line 
-of a given species is then given by the parameters ``N[i]``, ``b[i]``, 
+and associate line complex respectively. The i :superscript:`th` line
+of a given species is then given by the parameters ``N[i]``, ``b[i]``,
 and ``z[i]`` and is part of the same complex (and was fitted at the same time)
 as all lines with the same group number as ``group#[i]``.
 
-The ``fitted_flux`` is an ndarray of the same size as ``flux`` and 
+The ``fitted_flux`` is an ndarray of the same size as ``flux`` and
 ``wavelength`` that contains the cumulative absorption spectrum generated
 by the lines contained in ``fitted_lines``.
 
@@ -250,8 +294,8 @@
 
 .. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
 
-To generate a fit for a spectrum 
-:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit` 
+To generate a fit for a spectrum
+:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit`
 is called.
 This function controls the identification of line complexes, the fit
 of a series of absorption lines for each appropriate species, checks of
@@ -260,14 +304,14 @@
 Finding Line Complexes
 ----------------------
 
-Line complexes are found using the 
+Line complexes are found using the
 :func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.find_complexes`
-function. The process by which line complexes are found involves walking 
-through the array of flux in order from minimum to maximum wavelength, and 
-finding series of spatially contiguous cells whose flux is less than some 
-limit.  These regions are then checked in terms of an additional flux limit 
-and size.  The bounds of all the passing regions are then listed and returned. 
-Those bounds that cover an exceptionally large region of wavelength space will 
+function. The process by which line complexes are found involves walking
+through the array of flux in order from minimum to maximum wavelength, and
+finding series of spatially contiguous cells whose flux is less than some
+limit.  These regions are then checked in terms of an additional flux limit
+and size.  The bounds of all the passing regions are then listed and returned.
+Those bounds that cover an exceptionally large region of wavelength space will
 be broken up if a suitable cut point is found. This method is only appropriate
 for noiseless spectra.
 
@@ -280,25 +324,25 @@
 unstable when optimizing.
 
 The ``fitLim`` parameter controls what is the maximum flux that the trough
-of the region can have and still be considered a line complex. This 
+of the region can have and still be considered a line complex. This
 effectively controls the sensitivity to very low column absorbers. Default
-value is ``fitLim`` = 0.99. If a region is identified where the flux of the 
+value is ``fitLim`` = 0.99. If a region is identified where the flux of the
 trough is greater than this value, the region is simply ignored.
 
-The ``minLength`` parameter controls the minimum number of array elements 
+The ``minLength`` parameter controls the minimum number of array elements
 that an identified region must have. This value must be greater than or
 equal to 3 as there are a minimum of 3 free parameters that must be fit.
 Default is ``minLength`` = 3.
 
 The ``maxLength`` parameter controls the maximum number of array elements
 that an identified region can have before it is split into separate regions.
-Default is ``maxLength`` = 1000. This should be adjusted based on the 
+Default is ``maxLength`` = 1000. This should be adjusted based on the
 resolution of the spectrum to remain appropriate. The value correspond
-to a wavelength of roughly 50 angstroms. 
+to a wavelength of roughly 50 angstroms.
 
 The ``splitLim`` parameter controls how exceptionally large regions are split.
 When such a region is identified by having more array elements than
-``maxLength``, the point of maximum flux (or minimum absorption) in the 
+``maxLength``, the point of maximum flux (or minimum absorption) in the
 middle two quartiles is identified. If that point has a flux greater than
 or equal to ``splitLim``, then two separate complexes are created: one from
 the lower wavelength edge to the minimum absorption point and the other from
@@ -309,7 +353,7 @@
 Fitting a Line Complex
 ----------------------
 
-After a complex is identified, it is fitted by iteratively adding and 
+After a complex is identified, it is fitted by iteratively adding and
 optimizing a set of Voigt Profiles for a particular species until the
 region is considered successfully fit. The optimizing is accomplished
 using scipy's least squares optimizer. This requires an initial estimate
@@ -326,36 +370,36 @@
 smaller initial guess is given. These values are chosen to make optimization
 faster and more stable by being closer to the actual value, but the final
 results of fitting should not depend on them as they merely provide a
-starting point. 
+starting point.
 
-After the parameters for a line are optimized for the first time, the 
-optimized parameters are then used for the initial guess on subsequent 
-iterations with more lines. 
+After the parameters for a line are optimized for the first time, the
+optimized parameters are then used for the initial guess on subsequent
+iterations with more lines.
 
-The complex is considered successfully fit when the sum of the squares of 
+The complex is considered successfully fit when the sum of the squares of
 the difference between the flux generated from the fit and the desired flux
 profile is less than ``errBound``. ``errBound`` is related to the optional
-parameter to 
+parameter to
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.generate_total_fit`,
-``maxAvgError`` by the number of array elements in the region such that 
+``maxAvgError`` by the number of array elements in the region such that
 ``errBound`` = number of elements * ``maxAvgError``.
 
-There are several other conditions under which the cycle of adding and 
+There are several other conditions under which the cycle of adding and
 optimizing lines will halt. If the error of the optimized fit from adding
 a line is an order of magnitude worse than the error of the fit without
-that line, then it is assumed that the fitting has become unstable and 
+that line, then it is assumed that the fitting has become unstable and
 the latest line is removed. Lines are also prevented from being added if
 the total number of lines is greater than the number of elements in the flux
 array being fit divided by 3. This is because there must not be more free
-parameters in a fit than the number of points to constrain them. 
+parameters in a fit than the number of points to constrain them.
 
 Checking Fit Results
 --------------------
 
 After an acceptable fit for a region is determined, there are several steps
-the algorithm must go through to validate the fits. 
+the algorithm must go through to validate the fits.
 
-First, the parameters must be in a reasonable range. This is a check to make 
+First, the parameters must be in a reasonable range. This is a check to make
 sure that the optimization did not become unstable and generate a fit that
 diverges wildly outside the region where the fit was performed. This way, even
 if particular complex cannot be fit, the rest of the spectrum fitting still
@@ -363,13 +407,13 @@
 in the species parameter dictionary. These are merely broad limits that will
 prevent numerical instability rather than physical limits.
 
-In cases where a single species generates multiple lines (as in the OVI 
+In cases where a single species generates multiple lines (as in the OVI
 doublet), the fits are then checked for higher wavelength lines. Originally
 the fits are generated only considering the lowest wavelength fit to a region.
 This is because we perform the fitting of complexes in order from the lowest
 wavelength to the highest, so any contribution to a complex being fit must
 come from the lower wavelength as the higher wavelength contributions would
-already have been subtracted out after fitting the lower wavelength. 
+already have been subtracted out after fitting the lower wavelength.
 
 Saturated Lyman Alpha Fitting Tools
 -----------------------------------
@@ -380,8 +424,8 @@
 The basic approach is to simply try a much wider range of initial parameter
 guesses in order to find the true optimization minimum, rather than getting
 stuck in a local minimum. A set of hard coded initial parameter guesses
-for Lyman alpha lines is given by the function 
+for Lyman alpha lines is given by the function
 :func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.get_test_lines`.
 Also included in these parameter guesses is an an initial guess of a high
-column cool line overlapping a lower column warm line, indictive of a 
+column cool line overlapping a lower column warm line, indictive of a
 broad Lyman alpha (BLA) absorber.

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -3,17 +3,17 @@
 Clump Finding
 =============
 
-The clump finder uses a contouring algorithm to identified topologically 
-disconnected structures within a dataset.  This works by first creating a 
-single contour over the full range of the contouring field, then continually 
-increasing the lower value of the contour until it reaches the maximum value 
-of the field.  As disconnected structures are identified as separate contours, 
-the routine continues recursively through each object, creating a hierarchy of 
-clumps.  Individual clumps can be kept or removed from the hierarchy based on 
-the result of user-specified functions, such as checking for gravitational 
+The clump finder uses a contouring algorithm to identified topologically
+disconnected structures within a dataset.  This works by first creating a
+single contour over the full range of the contouring field, then continually
+increasing the lower value of the contour until it reaches the maximum value
+of the field.  As disconnected structures are identified as separate contours,
+the routine continues recursively through each object, creating a hierarchy of
+clumps.  Individual clumps can be kept or removed from the hierarchy based on
+the result of user-specified functions, such as checking for gravitational
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
-The clump finder requires a data object (see :ref:`data-objects`) and a field 
+The clump finder requires a data object (see :ref:`data-objects`) and a field
 over which the contouring is to be performed.
 
 .. code:: python
@@ -28,11 +28,11 @@
 
    master_clump = Clump(data_source, ("gas", "density"))
 
-At this point, every isolated contour will be considered a clump, 
-whether this is physical or not.  Validator functions can be added to 
-determine if an individual contour should be considered a real clump.  
-These functions are specified with the ``Clump.add_validator`` function.  
-Current, two validators exist: a minimum number of cells and gravitational 
+At this point, every isolated contour will be considered a clump,
+whether this is physical or not.  Validator functions can be added to
+determine if an individual contour should be considered a real clump.
+These functions are specified with the ``Clump.add_validator`` function.
+Current, two validators exist: a minimum number of cells and gravitational
 boundedness.
 
 .. code:: python
@@ -41,9 +41,9 @@
 
    master_clump.add_validator("gravitationally_bound", use_particles=False)
 
-As many validators as desired can be added, and a clump is only kept if all 
-return True.  If not, a clump is remerged into its parent.  Custom validators 
-can easily be added.  A validator function must only accept a ``Clump`` object 
+As many validators as desired can be added, and a clump is only kept if all
+return True.  If not, a clump is remerged into its parent.  Custom validators
+can easily be added.  A validator function must only accept a ``Clump`` object
 and either return True or False.
 
 .. code:: python
@@ -52,16 +52,16 @@
        return (clump["gas", "cell_mass"].sum() >= min_mass)
    add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-The ``add_validator`` function adds the validator to a registry that can 
-be accessed by the clump finder.  Then, the validator can be added to the 
+The ``add_validator`` function adds the validator to a registry that can
+be accessed by the clump finder.  Then, the validator can be added to the
 clump finding just like the others.
 
 .. code:: python
 
    master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-The clump finding algorithm accepts the ``Clump`` object, the initial minimum 
-and maximum of the contouring field, and the step size.  The lower value of the 
+The clump finding algorithm accepts the ``Clump`` object, the initial minimum
+and maximum of the contouring field, and the step size.  The lower value of the
 contour finder will be continually multiplied by the step size.
 
 .. code:: python
@@ -71,9 +71,9 @@
    step = 2.0
    find_clumps(master_clump, c_min, c_max, step)
 
-After the clump finding has finished, the master clump will represent the top 
-of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object 
-contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object 
+After the clump finding has finished, the master clump will represent the top
+of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object
+contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object
 with its own ``children`` attribute, and so on.
 
 A number of helper routines exist for examining the clump hierarchy.
@@ -96,15 +96,15 @@
    print(leaf_clumps[0]["gas", "density"])
    print(leaf_clumps[0].quantities.total_mass())
 
-The writing functions will write out a series or properties about each 
-clump by default.  Additional properties can be appended with the 
+The writing functions will write out a series or properties about each
+clump by default.  Additional properties can be appended with the
 ``Clump.add_info_item`` function.
 
 .. code:: python
 
    master_clump.add_info_item("total_cells")
 
-Just like the validators, custom info items can be added by defining functions 
+Just like the validators, custom info items can be added by defining functions
 that minimally accept a ``Clump`` object and return a string to be printed.
 
 .. code:: python
@@ -121,16 +121,16 @@
 
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
-By default, the following info items are activated: **total_cells**, 
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**, 
-**max_grid_level**, **min_number_density**, **max_number_density**, and 
+By default, the following info items are activated: **total_cells**,
+**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
+**max_grid_level**, **min_number_density**, **max_number_density**, and
 **distance_to_main_clump**.
 
 Clumps can be visualized using the ``annotate_clumps`` callback.
 
 .. code:: python
 
-   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"), 
+   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"),
                            center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -91,7 +91,7 @@
 The center of mass would be the same one as returned by the halo
 finder.  The A, B, C are the largest to smallest magnitude of the
 ellipsoid's semi-principle axes. "e0" is the largest semi-principle
-axis vector direction that would have magnitude A but normalized.  
+axis vector direction that would have magnitude A but normalized.
 The "tilt" is an angle measured in radians.  It can be best described
 as after the rotation about the z-axis to align e0 to x in the x-y
 plane, and then rotating about the y-axis to align e0 completely to
@@ -128,7 +128,7 @@
 Since this is a first attempt, there are many drawbacks and corners
 cut.  Many things listed here will be amended when I have time.
 
-* The ellipsoid 3D container like the boolean object, do not contain 
+* The ellipsoid 3D container like the boolean object, do not contain
   particle position and velocity information.
 * This currently assume periodic boundary condition, so if an
   ellipsoid center is at the edge, it will return part of the opposite
@@ -136,7 +136,7 @@
   periodicity in the future.
 * This method gives a minimalistic ellipsoid centered around the
   center of mass that contains all the particles, but sometimes people
-  prefer an inertial tensor triaxial ellipsoid described in 
+  prefer an inertial tensor triaxial ellipsoid described in
   `Dubinski, Carlberg 1991
   <http://adsabs.harvard.edu/abs/1991ApJ...378..496D>`_.  I have that
   method composed but it is not fully tested yet.

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -7,21 +7,21 @@
 ----------------------
 
 In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought 
+merger tree creation, and individual halo analysis) are all brought
 together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is 
-entirely backward incompatible.  
+different from the halo analysis machinery available in yt-2.x and is
+entirely backward incompatible.
 For a direct translation of various halo analysis tasks using yt-2.x
 to yt-3.0 please see :ref:`halo-transition`.
 
-A catalog of halos can be created from any initial dataset given to halo 
+A catalog of halos can be created from any initial dataset given to halo
 catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`. 
-For more details on the relative differences between these halo finders see 
+use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
+For more details on the relative differences between these halo finders see
 :ref:`halo_finding`.
 
-The class which holds all of the halo information is the 
+The class which holds all of the halo information is the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
@@ -32,11 +32,11 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
-A halo catalog may also be created from already run rockstar outputs. 
-This method is not implemented for previously run friends-of-friends or 
-HOP finders. Even though rockstar creates one file per processor, 
-specifying any one file allows the full catalog to be loaded. Here we 
-only specify the file output by the processor with ID 0. Note that the 
+A halo catalog may also be created from already run rockstar outputs.
+This method is not implemented for previously run friends-of-friends or
+HOP finders. Even though rockstar creates one file per processor,
+specifying any one file allows the full catalog to be loaded. Here we
+only specify the file output by the processor with ID 0. Note that the
 argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
@@ -44,10 +44,10 @@
    halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
    hc = HaloCatalog(halos_ds=halos_ds)
 
-Although supplying only the binary output of the rockstar halo finder 
-is sufficient for creating a halo catalog, it is not possible to find 
-any new information about the identified halos. To associate the halos 
-with the dataset from which they were found, supply arguments to both 
+Although supplying only the binary output of the rockstar halo finder
+is sufficient for creating a halo catalog, it is not possible to find
+any new information about the identified halos. To associate the halos
+with the dataset from which they were found, supply arguments to both
 halos_ds and data_ds.
 
 .. code-block:: python
@@ -56,14 +56,14 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
-A data object can also be supplied via the keyword ``data_source``, 
-associated with either dataset, to control the spatial region in 
+A data object can also be supplied via the keyword ``data_source``,
+associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
 Analysis Using Halo Catalogs
 ----------------------------
 
-Analysis is done by adding actions to the 
+Analysis is done by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
@@ -73,18 +73,18 @@
 * Callbacks
 * Recipes
 
-A list of all available filters, quantities, and callbacks can be found in 
-:ref:`halo_analysis_ref`.  
-All interaction with this analysis can be performed by importing from 
+A list of all available filters, quantities, and callbacks can be found in
+:ref:`halo_analysis_ref`.
+All interaction with this analysis can be performed by importing from
 halo_analysis.
 
 Filters
 ^^^^^^^
 
-A filter is a function that returns True or False. If the return value 
-is True, any further queued analysis will proceed and the halo in 
-question will be added to the final catalog. If the return value False, 
-further analysis will not be performed and the halo will not be included 
+A filter is a function that returns True or False. If the return value
+is True, any further queued analysis will proceed and the halo in
+question will be added to the final catalog. If the return value False,
+further analysis will not be performed and the halo will not be included
 in the final catalog.
 
 An example of adding a filter:
@@ -93,11 +93,11 @@
 
    hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
 
-Currently quantity_value is the only available filter, but more can be 
-added by the user by defining a function that accepts a halo object as 
-the first argument and then adding it as an available filter. If you 
-think that your filter may be of use to the general community, you can 
-add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a 
+Currently quantity_value is the only available filter, but more can be
+added by the user by defining a function that accepts a halo object as
+the first argument and then adding it as an available filter. If you
+think that your filter may be of use to the general community, you can
+add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a
 pull request.
 
 An example of defining your own filter:
@@ -105,11 +105,11 @@
 .. code-block:: python
 
    def my_filter_function(halo):
-       
+
        # Define condition for filter
        filter_value = True
-       
-       # Return a boolean value 
+
+       # Return a boolean value
        return filter_value
 
    # Add your filter to the filter registry
@@ -121,17 +121,17 @@
 Quantities
 ^^^^^^^^^^
 
-A quantity is a call back that returns a value or values. The return values 
-are stored within the halo object in a dictionary called “quantities.” At 
-the end of the analysis, all of these quantities will be written to disk as 
+A quantity is a call back that returns a value or values. The return values
+are stored within the halo object in a dictionary called “quantities.” At
+the end of the analysis, all of these quantities will be written to disk as
 the final form of the generated halo catalog.
 
-Quantities may be available in the initial fields found in the halo catalog, 
-or calculated from a function after supplying a definition. An example 
-definition of center of mass is shown below. Currently available quantities 
-are center_of_mass and bulk_velocity. Their definitions are available in 
-``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
-your quantity may be of use to the general community, add it to 
+Quantities may be available in the initial fields found in the halo catalog,
+or calculated from a function after supplying a definition. An example
+definition of center of mass is shown below. Currently available quantities
+are center_of_mass and bulk_velocity. Their definitions are available in
+``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that
+your quantity may be of use to the general community, add it to
 ``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
 
 * ``particle_identifier`` -- Halo ID (e.g. 0 to N)
@@ -154,7 +154,7 @@
    def my_quantity_function(halo):
        # Define quantity to return
        quantity = 5
-       
+
        return quantity
 
    # Add your filter to the filter registry
@@ -162,9 +162,9 @@
 
 
    # ... Later on in your script
-   hc.add_quantity("my_quantity") 
+   hc.add_quantity("my_quantity")
 
-This quantity will then be accessible for functions called later via the 
+This quantity will then be accessible for functions called later via the
 *quantities* dictionary that is associated with the halo object.
 
 .. code-block:: python
@@ -179,23 +179,23 @@
 Callbacks
 ^^^^^^^^^
 
-A callback is actually the super class for quantities and filters and 
-is a general purpose function that does something, anything, to a Halo 
-object. This can include hanging new attributes off the Halo object, 
-performing analysis and writing to disk, etc. A callback does not return 
+A callback is actually the super class for quantities and filters and
+is a general purpose function that does something, anything, to a Halo
+object. This can include hanging new attributes off the Halo object,
+performing analysis and writing to disk, etc. A callback does not return
 anything.
 
-An example of using a pre-defined callback where we create a sphere for 
+An example of using a pre-defined callback where we create a sphere for
 each halo with a radius that is twice the saved ``radius``.
 
 .. code-block:: python
 
    hc.add_callback("sphere", factor=2.0)
-    
-Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
-be added by using the syntax shown below. If you think that your 
-callback may be of use to the general community, add it to 
+
+Currently available callbacks are located in
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may
+be added by using the syntax shown below. If you think that your
+callback may be of use to the general community, add it to
 halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
@@ -261,37 +261,37 @@
 Running Analysis
 ----------------
 
-After all callbacks, quantities, and filters have been added, the 
+After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
 
 .. code-block:: python
 
    hc.create()
 
-The save_halos keyword determines whether the actual Halo objects 
-are saved after analysis on them has completed or whether just the 
-contents of their quantities dicts will be retained for creating the 
-final catalog. The looping over halos uses a call to parallel_objects 
-allowing the user to control how many processors work on each halo. 
-The final catalog is written to disk in the output directory given 
-when the 
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
+The save_halos keyword determines whether the actual Halo objects
+are saved after analysis on them has completed or whether just the
+contents of their quantities dicts will be retained for creating the
+final catalog. The looping over halos uses a call to parallel_objects
+allowing the user to control how many processors work on each halo.
+The final catalog is written to disk in the output directory given
+when the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 object was created.
 
-All callbacks, quantities, and filters are stored in an actions list, 
-meaning that they are executed in the same order in which they were added. 
-This enables the use of simple, reusable, single action callbacks that 
-depend on each other. This also prevents unnecessary computation by allowing 
-the user to add filters at multiple stages to skip remaining analysis if it 
+All callbacks, quantities, and filters are stored in an actions list,
+meaning that they are executed in the same order in which they were added.
+This enables the use of simple, reusable, single action callbacks that
+depend on each other. This also prevents unnecessary computation by allowing
+the user to add filters at multiple stages to skip remaining analysis if it
 is not warranted.
 
 Saving and Reloading Halo Catalogs
 ----------------------------------
 
-A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
-saved to disk can be reloaded as a yt dataset with the 
-standard call to load. Any side data, such as profiles, can be reloaded 
-with a ``load_profiles`` callback and a call to 
+A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+saved to disk can be reloaded as a yt dataset with the
+standard call to load. Any side data, such as profiles, can be reloaded
+with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
 .. code-block:: python
@@ -306,5 +306,5 @@
 Worked Example of Halo Catalog in Action
 ----------------------------------------
 
-For a full example of how to use these methods together see 
+For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -3,16 +3,16 @@
 Halo Finding
 ============
 
-There are three methods of finding particle haloes in yt. The 
-default method is called HOP, a method described 
-in `Eisenstein and Hut (1998) 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic 
-friends-of-friends (e.g. `Efstathiou et al. (1985) 
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo 
-finder is also implemented. Finally Rockstar (`Behroozi et a. 
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is 
-a 6D-phase space halo finder developed by Peter Behroozi that 
-excels in finding subhalos and substrcture, but does not allow 
+There are three methods of finding particle haloes in yt. The
+default method is called HOP, a method described
+in `Eisenstein and Hut (1998)
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
+friends-of-friends (e.g. `Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
+finder is also implemented. Finally Rockstar (`Behroozi et a.
+(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
+a 6D-phase space halo finder developed by Peter Behroozi that
+excels in finding subhalos and substrcture, but does not allow
 multiple particle masses.
 
 .. _hop:
@@ -20,32 +20,32 @@
 HOP
 ---
 
-The version of HOP used in yt is an upgraded version of the 
-`publicly available HOP code 
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support 
-for 64-bit floats and integers has been added, as well as 
-parallel analysis through spatial decomposition. HOP builds 
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
 groups in this fashion:
 
-#. Estimates the local density at each particle using a 
+#. Estimates the local density at each particle using a
    smoothing kernel.
 
-#. Builds chains of linked particles by 'hopping' from one 
-   particle to its densest neighbor. A particle which is 
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
    its own densest neighbor is the end of the chain.
 
-#. All chains that share the same densest particle are 
+#. All chains that share the same densest particle are
    grouped together.
 
-#. Groups are included, linked together, or discarded 
+#. Groups are included, linked together, or discarded
    depending on the user-supplied over density
    threshold parameter. The default is 160.0.
 
 Please see the `HOP method paper 
 <http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
 full details and the 
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
+documentation.
 
 .. _fof:
 
@@ -53,36 +53,36 @@
 ---
 
 A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
+documentation.
 
 .. _rockstar:
 
 Rockstar Halo Finding
 ---------------------
 
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends 
-groups in six phase-space dimensions and one time dimension, which 
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
 allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt, 
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead 
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
 developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_. 
-In order to run the Rockstar halo finder in yt, make sure you've 
+et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
 :ref:`installed it so that it can integrate with yt <rockstar-installation>`.
 
-At the moment, Rockstar does not support multiple particle masses, 
-instead using a fixed particle mass. This will not affect most dark matter 
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
 simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the 
+mass. In simulations where the highest-resolution particles all have the
 same mass (ie: zoom-in grid based simulations), one can set up a particle
 filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example: 
+only on those.  See the this cookbook recipe for an example:
 :ref:`cookbook-rockstar-nested-grid`.
 
-To run the Rockstar Halo finding, you must launch python with MPI and 
-parallelization enabled. While Rockstar itself does not require MPI to run, 
-the MPI libraries allow yt to distribute particle information across multiple 
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
 nodes.
 
 .. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
@@ -92,23 +92,23 @@
    For example, here is how Rockstar might be called using 24 cores:
    ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
 
-The script above configures the Halo finder, launches a server process which 
-disseminates run information and coordinates writer-reader processes. 
-Afterwards, it launches reader and writer tasks, filling the available MPI 
-slots, which alternately read particle information and analyze for halo 
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
 content.
 
-The RockstarHaloFinder class has these options that can be supplied to the 
+The RockstarHaloFinder class has these options that can be supplied to the
 halo catalog through the ``finder_kwargs`` argument:
 
-* ``dm_type``, the index of the dark matter particle. Default is 1. 
+* ``dm_type``, the index of the dark matter particle. Default is 1.
 * ``outbase``, This is where the out*list files that Rockstar makes should be
   placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the 
+* ``num_readers``, the number of reader tasks (which are idle most of the
   time.) Default is 1.
 * ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1. 
-  If left undefined, the above options are automatically 
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
   configured from the number of available MPI tasks.
 * ``force_res``, the resolution that Rockstar uses for various calculations
   and smoothing lengths. This is in units of Mpc/h.
@@ -130,14 +130,14 @@
   this option can save disk access time if there are no star particles
   (or other non-dark matter particles) in the simulation. Default: ``False``.
 
-Rockstar dumps halo information in a series of text (halo*list and 
-out*list) and binary (halo*bin) files inside the ``outbase`` directory. 
-We use the halo list classes to recover the information. 
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
 
 Inside the ``outbase`` directory there is a text file named ``datasets.txt``
 that records the connection between ds names and the Rockstar file names.
 
-For more information, see the 
+For more information, see the
 :class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
 :class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
 
@@ -146,9 +146,9 @@
 Parallel HOP and FOF
 --------------------
 
-Both the HOP and FoF halo finders can run in parallel using simple 
-spatial decomposition. In order to run them in parallel it is helpful 
-to understand how it works. Below in the first plot (i) is a simplified 
+Both the HOP and FoF halo finders can run in parallel using simple
+spatial decomposition. In order to run them in parallel it is helpful
+to understand how it works. Below in the first plot (i) is a simplified
 depiction of three haloes labeled 1,2 and 3:
 
 .. image:: _images/ParallelHaloFinder.png
@@ -156,35 +156,35 @@
 
 Halo 3 is twice reflected around the periodic boundary conditions.
 
-In (ii), the volume has been sub-divided into four equal subregions, 
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in 
-two different subregions, C and D, and that halo 3 is now in three, 
+In (ii), the volume has been sub-divided into four equal subregions,
+A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
+two different subregions, C and D, and that halo 3 is now in three,
 A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split 
-up into multiple haloes, which is incorrect. The solution is to give 
+halo 1 is be identified as a single halo, but haloes 2 and 3 are split
+up into multiple haloes, which is incorrect. The solution is to give
 each subregion padding to oversample into neighboring regions.
 
-In (iii), subregion C has oversampled into the other three regions, 
-with the periodic boundary conditions taken into account, shown by 
+In (iii), subregion C has oversampled into the other three regions,
+with the periodic boundary conditions taken into account, shown by
 dot-dashed lines. The other subregions oversample in a similar way.
 
-The halo finder is then run on each padded subregion independently 
-and simultaneously. By oversampling like this, haloes 2 and 3 will 
-both be enclosed fully in at least one subregion and identified 
+The halo finder is then run on each padded subregion independently
+and simultaneously. By oversampling like this, haloes 2 and 3 will
+both be enclosed fully in at least one subregion and identified
 completely.
 
-Haloes identified with centers of mass inside the padded part of a 
-subregion are thrown out, eliminating the problem of halo duplication. 
+Haloes identified with centers of mass inside the padded part of a
+subregion are thrown out, eliminating the problem of halo duplication.
 The centers for the three haloes are shown with stars. Halo 1 will
 belong to subregion A, 2 to C and 3 to B.
 
-To run with parallel halo finding, you must supply a value for 
-padding in the finder_kwargs argument. The ``padding`` parameter 
-is in simulation units and defaults to 0.02. This parameter is how 
-much padding is added to each of the six sides of a subregion. 
-This value should be 2x-3x larger than the largest expected halo 
-in the simulation. It is unlikely, of course, that the largest 
-object in the simulation will be on a subregion boundary, but there 
+To run with parallel halo finding, you must supply a value for
+padding in the finder_kwargs argument. The ``padding`` parameter
+is in simulation units and defaults to 0.02. This parameter is how
+much padding is added to each of the six sides of a subregion.
+This value should be 2x-3x larger than the largest expected halo
+in the simulation. It is unlikely, of course, that the largest
+object in the simulation will be on a subregion boundary, but there
 is no way of knowing before the halo finder is run.
 
 .. code-block:: python
@@ -197,10 +197,10 @@
   # --or--
   hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
 
-In general, a little bit of padding goes a long way, and too much 
-just slows down the analysis and doesn't improve the answer (but 
-doesn't change it).  It may be worth your time to run the parallel 
-halo finder at a few paddings to find the right amount, especially 
+In general, a little bit of padding goes a long way, and too much
+just slows down the analysis and doesn't improve the answer (but
+doesn't change it).  It may be worth your time to run the parallel
+halo finder at a few paddings to find the right amount, especially
 if you're analyzing many similar datasets.
 
 .. _rockstar-installation:
@@ -209,15 +209,15 @@
 ---------------------
 
 Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been 
-slightly patched and modified to run as a library inside of yt. By default it 
-is not installed with yt, but installation is very easy.  The 
-:ref:`install-script` used to install yt from source has a line: 
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
 ``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
 rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.  
+it will only install components missing from the existing installation.
 You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of 
+the parent directory of the yt installation directory (e.g. the parent of
 ``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
 
 .. code-block:: bash

diff -r 209f3d0b966518dac95cff141d997d967d375ee5 -r d8eec89b2c86f300ce9cfb0205b97cefb5dd0c45 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -11,21 +11,21 @@
 General Overview
 ----------------
 
-A halo mass function can be created for the halos identified in a cosmological 
+A halo mass function can be created for the halos identified in a cosmological
 simulation, as well as analytic fits using any arbitrary set of cosmological
 parameters. In order to create a mass function for simulated halos, they must
-first be identified (using HOP, FOF, or Rockstar, see 
+first be identified (using HOP, FOF, or Rockstar, see
 :ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
 halo masses will then be found, and can be compared to the analytic prediction
 at the same redshift and using the same cosmological parameters as were used
 in the simulation. Care should be taken in this regard, as the analytic fit
-requires the specification of cosmological parameters that are not necessarily 
+requires the specification of cosmological parameters that are not necessarily
 stored in the halo or simulation datasets, and must be specified by the user.
-Efforts have been made to set reasonable defaults for these parameters, but 
+Efforts have been made to set reasonable defaults for these parameters, but
 setting them to identically match those used in the simulation will produce a
 much better comparison.
 
-Analytic halo mass functions can also be created without a halo dataset by 
+Analytic halo mass functions can also be created without a halo dataset by
 providing either a simulation dataset or specifying cosmological parameters by
 hand. yt includes 5 analytic fits for the halo mass function which can be
 selected.
@@ -65,8 +65,8 @@
 
 This will create a HaloMassFcn object off of which arrays holding the information
 about the analytic mass function hang. Creating the halo mass function for a set
-of simulated halos requires only the loaded halo dataset to be passed as an 
-argument. This also creates the analytic mass function using all parameters that 
+of simulated halos requires only the loaded halo dataset to be passed as an
+argument. This also creates the analytic mass function using all parameters that
 can be extracted from the halo dataset, at the same redshift, spanning a similar
 range of halo masses.
 
@@ -78,7 +78,7 @@
   my_halos = load("rockstar_halos/halos_0.0.bin")
   hmf = HaloMassFcn(halos_ds=my_halos)
 
-A simulation dataset can be passed along with additional cosmological parameters 
+A simulation dataset can be passed along with additional cosmological parameters
 to create an analytic mass function.
 
 .. code-block:: python
@@ -87,10 +87,10 @@
   from yt.analysis_modules.halo_mass_function.api import *
 
   my_ds = load("RD0027/RedshiftOutput0027")
-  hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96, 
+  hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96,
                     sigma8 = 0.8, log_mass_min=5, log_mass_max=9)
 
-The analytic mass function can be created for a set of arbitrary cosmological 
+The analytic mass function can be created for a set of arbitrary cosmological
 parameters without any dataset being passed as an argument.
 
 .. code-block:: python
@@ -98,7 +98,7 @@
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
 
-  hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
+  hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27,
                     omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
                     log_mass_min=5, log_mass_max=9, fitting_function=5)
 
@@ -110,95 +110,95 @@
   Default : None.
 
 * **halos_ds** (*Halo dataset object*)
-  The halos from a simulation to be used for creation of the 
+  The halos from a simulation to be used for creation of the
   halo mass function in the simulation.
   Default : None.
 
 * **make_analytic** (*bool*)
-  Whether or not to calculate the analytic mass function to go with 
-  the simulated halo mass function.  Automatically set to true if a 
+  Whether or not to calculate the analytic mass function to go with
+  the simulated halo mass function.  Automatically set to true if a
   simulation dataset is provided.
   Default : True.
 
 * **omega_matter0** (*float*)
-  The fraction of the universe made up of matter (dark and baryonic). 
+  The fraction of the universe made up of matter (dark and baryonic).
   Default : 0.2726.
 
 * **omega_lambda0** (*float*)
-  The fraction of the universe made up of dark energy. 
+  The fraction of the universe made up of dark energy.
   Default : 0.7274.
 
 * **omega_baryon0**  (*float*)
-  The fraction of the universe made up of baryonic matter. This is not 
+  The fraction of the universe made up of baryonic matter. This is not
   always stored in the dataset and should be checked by hand.
   Default : 0.0456.
 
 * **hubble0** (*float*)
-  The expansion rate of the universe in units of 100 km/s/Mpc. 
+  The expansion rate of the universe in units of 100 km/s/Mpc.
   Default : 0.704.
 
 * **sigma8** (*float*)
-  The amplitude of the linear power spectrum at z=0 as specified by 
-  the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
-  8 Mpc/h. This is not always stored in the dataset and should be 
+  The amplitude of the linear power spectrum at z=0 as specified by
+  the rms amplitude of mass-fluctuations in a top-hat sphere of radius
+  8 Mpc/h. This is not always stored in the dataset and should be
   checked by hand.
   Default : 0.86.
 
 * **primoridal_index** (*float*)
-  This is the index of the mass power spectrum before modification by 
-  the transfer function. A value of 1 corresponds to the scale-free 
-  primordial spectrum. This is not always stored in the dataset and 
+  This is the index of the mass power spectrum before modification by
+  the transfer function. A value of 1 corresponds to the scale-free
+  primordial spectrum. This is not always stored in the dataset and
   should be checked by hand.
   Default : 1.0.
 
 * **this_redshift** (*float*)
-  The current redshift. 
+  The current redshift.
   Default : 0.
 
 * **log_mass_min** (*float*)
   The log10 of the mass of the minimum of the halo mass range. This is
-  set automatically by the range of halo masses if a simulated halo 
+  set automatically by the range of halo masses if a simulated halo
   dataset is provided. If a halo dataset if not provided and no value
   is specified, it will be set to 5. Units: M_solar
   Default : None.
 
 * **log_mass_max** (*float*)
   The log10 of the mass of the maximum of the halo mass range. This is
-  set automatically by the range of halo masses if a simulated halo 
+  set automatically by the range of halo masses if a simulated halo
   dataset is provided. If a halo dataset if not provided and no value
   is specified, it will be set to 16. Units: M_solar
   Default : None.
 
 * **num_sigma_bins** (*float*)
-  The number of bins (points) to use for the calculation of the 
-  analytic mass function. 
+  The number of bins (points) to use for the calculation of the
+  analytic mass function.
   Default : 360.
 
 * **fitting_function** (*int*)
-  Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
+  Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins,
   3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
   Default : 4.
 
 Outputs
 -------
 
-A HaloMassFnc object has several arrays hanging off of it containing the 
+A HaloMassFnc object has several arrays hanging off of it containing the
 
 * **masses_sim**: Halo masses from simulated halos. Units: M_solar
 
-* **n_cumulative_sim**: Number density of halos with mass greater than the 
+* **n_cumulative_sim**: Number density of halos with mass greater than the
   corresponding mass in masses_sim. Units: comoving Mpc^-3
 
-* **masses_analytic**: Masses used for the generation of the analytic mass 
+* **masses_analytic**: Masses used for the generation of the analytic mass
   function. Units: M_solar
 
-* **n_cumulative_analytic**: Number density of halos with mass greater then 
+* **n_cumulative_analytic**: Number density of halos with mass greater then
   the corresponding mass in masses_analytic. Units: comoving Mpc^-3
 
 * **dndM_dM_analytic**: Differential number density of halos, (dn/dM)*dM.
 
 After the mass function has been created for both simulated halos and the
-corresponding analytic fits, they can be plotted though something along the 
+corresponding analytic fits, they can be plotted though something along the
 lines of
 
 .. code-block:: python
@@ -213,7 +213,7 @@
   plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
   plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
 
-Attached to ``hmf`` is the convenience function ``write_out``, which saves the 
+Attached to ``hmf`` is the convenience function ``write_out``, which saves the
 halo mass function to a text file. (continued from above)
 .. code-block:: python
 

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160414/708f769c/attachment-0001.htm>


More information about the yt-svn mailing list