[yt-svn] commit/yt-3.0: 31 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Sep 6 08:48:56 PDT 2013
31 new commits in yt-3.0:
https://bitbucket.org/yt_analysis/yt-3.0/commits/7326a8d3469f/
Changeset: 7326a8d3469f
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-29 00:44:45
Summary: Adding OctList object and beginning a neighbor search.
Affected #: 3 files
diff -r b95e8f2a3c0e10f8b10c60b606886c54523a9df9 -r 7326a8d3469f1377e7604c5454a65876cd130f26 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -40,6 +40,8 @@
cdef struct OctInfo:
np.float64_t left_edge[3]
np.float64_t dds[3]
+ np.int64_t ipos[3]
+ np.int32_t level
cdef struct OctAllocationContainer
cdef struct OctAllocationContainer:
@@ -49,6 +51,16 @@
OctAllocationContainer *next
Oct *my_octs
+cdef struct OctList
+
+cdef struct OctList:
+ OctList *next
+ Oct *o
+
+cdef OctList *OctList_append(OctList *list, Oct *o)
+cdef int OctList_count(OctList *list)
+cdef void OctList_delete(OctList *list)
+
cdef class OctreeContainer:
cdef OctAllocationContainer *cont
cdef OctAllocationContainer **domains
@@ -60,7 +72,7 @@
cdef public int max_domain
cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
cdef int get_root(self, int ind[3], Oct **o)
- cdef void neighbors(self, Oct *, Oct **)
+ cdef int neighbors(self, OctInfo *oinfo, Oct **neighbors)
cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
# This function must return the offset from global-to-local domains; i.e.,
# OctAllocationContainer.offset if such a thing exists.
diff -r b95e8f2a3c0e10f8b10c60b606886c54523a9df9 -r 7326a8d3469f1377e7604c5454a65876cd130f26 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -190,18 +190,22 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
+ cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
+ ):
#Given a floating point position, retrieve the most
#refined oct at that time
- cdef int ind[3]
+ cdef int ind[3], level
+ cdef np.int64_t ipos[3]
cdef np.float64_t dds[3], cp[3], pp[3]
cdef Oct *cur, *next
+ cdef int i
cur = next = NULL
- cdef int i
+ level = 0
for i in range(3):
dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+ ipos[i] = ind[i]
self.get_root(ind, &next)
# We want to stop recursing when there's nowhere else to go
while next != NULL:
@@ -216,6 +220,9 @@
cp[i] += dds[i]/2.0
if cur.children != NULL:
next = cur.children[cind(ind[0],ind[1],ind[2])]
+ for i in range(3):
+ ipos[i] = (ipos[i] << 1) + ind[i]
+ level += 1
else:
next = NULL
if oinfo == NULL: return cur
@@ -230,6 +237,8 @@
# oct width, thus making it already the cell width
oinfo.dds[i] = dds[i] # Cell width
oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
+ oinfo.ipos[i] = ipos[i]
+ oinfo.level = level
return cur
def domain_identify(self, SelectorObject selector):
@@ -249,99 +258,10 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef void neighbors(self, Oct* o, Oct* neighbors[27]):
- #Get 3x3x3 neighbors, although the 1,1,1 oct is the
- #central one.
- #Return an array of Octs
- cdef np.int64_t curopos[3]
- cdef np.int64_t curnpos[3]
- cdef np.int64_t npos[3]
- cdef int i, j, k, ni, nj, nk, ind[3], nn, dl, skip
- cdef np.float64_t dds[3], cp[3], pp[3]
+ cdef int neighbors(self, OctInfo *oinfo, Oct** neighbors):
cdef Oct* candidate
- for i in range(27): neighbors[i] = NULL
nn = 0
- raise RuntimeError
- #for ni in range(3):
- # for nj in range(3):
- # for nk in range(3):
- # if ni == nj == nk == 1:
- # neighbors[nn] = o
- # nn += 1
- # continue
- # npos[0] = o.pos[0] + (ni - 1)
- # npos[1] = o.pos[1] + (nj - 1)
- # npos[2] = o.pos[2] + (nk - 1)
- # for i in range(3):
- # # Periodicity
- # if npos[i] == -1:
- # npos[i] = (self.nn[i] << o.level) - 1
- # elif npos[i] == (self.nn[i] << o.level):
- # npos[i] = 0
- # curopos[i] = o.pos[i]
- # curnpos[i] = npos[i]
- # # Now we have our neighbor position and a safe place to
- # # keep it. curnpos will be the root index of the neighbor
- # # at a given level, and npos will be constant. curopos is
- # # the candidate root at a level.
- # candidate = o
- # while candidate != NULL:
- # if ((curopos[0] == curnpos[0]) and
- # (curopos[1] == curnpos[1]) and
- # (curopos[2] == curnpos[2])):
- # break
- # # This one doesn't meet it, so we pop up a level.
- # # First we update our positions, then we update our
- # # candidate.
- # for i in range(3):
- # # We strip a digit off the right
- # curopos[i] = (curopos[i] >> 1)
- # curnpos[i] = (curnpos[i] >> 1)
- # # Now we update to the candidate's parent, which should
- # # have a matching position to curopos[]
- # # TODO: This has not survived the transition to
- # # mostly-stateless Octs!
- # raise RuntimeError
- # candidate = candidate.parent
- # if candidate == NULL:
- # # Worst case scenario
- # for i in range(3):
- # ind[i] = (npos[i] >> (o.level))
- # candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
- # # Now we have the common root, which may be NULL
- # while candidate.level < o.level:
- # dl = o.level - (candidate.level + 1)
- # for i in range(3):
- # ind[i] = (npos[i] >> dl) & 1
- # if candidate.children[cind(ind[0],ind[1],ind[2])] \
- # == NULL:
- # break
- # candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
- # neighbors[nn] = candidate
- # nn += 1
-
- @cython.boundscheck(False)
- @cython.wraparound(False)
- @cython.cdivision(True)
- def get_neighbor_boundaries(self, oppos):
- cdef int i, ii
- cdef np.float64_t ppos[3]
- for i in range(3):
- ppos[i] = oppos[i]
- cdef Oct *main = self.get(ppos)
- cdef Oct* neighbors[27]
- self.neighbors(main, neighbors)
- cdef np.ndarray[np.float64_t, ndim=2] bounds
- cdef np.float64_t corner[3], size[3]
- bounds = np.zeros((27,6), dtype="float64")
- tnp = 0
- raise RuntimeError
- for i in range(27):
- self.oct_bounds(neighbors[i], corner, size)
- for ii in range(3):
- bounds[i, ii] = corner[ii]
- bounds[i, 3+ii] = size[ii]
- return bounds
+ return 0
@cython.boundscheck(False)
@cython.wraparound(False)
@@ -786,3 +706,26 @@
dest[local_filled + offset] = source[ox,oy,oz]
local_filled += 1
return local_filled
+
+cdef OctList *OctList_append(OctList *list, Oct *o):
+ cdef OctList *this = list
+ while this.next != NULL:
+ this = this.next
+ this.next = <OctList*> malloc(sizeof(OctList))
+ this.next.o = o
+ return this.next
+
+cdef int OctList_count(OctList *list):
+ cdef OctList *this = list
+ cdef int i = 0 # Count the list
+ while this != NULL:
+ i += 1
+ this = this.next
+ return i
+
+cdef void OctList_delete(OctList *list):
+ cdef OctList *next, *this = list
+ while this != NULL:
+ next = this.next
+ free(this)
+ this = next
diff -r b95e8f2a3c0e10f8b10c60b606886c54523a9df9 -r 7326a8d3469f1377e7604c5454a65876cd130f26 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -3,7 +3,7 @@
Author: Matthew Turk <matthewturk at gmail.com>
Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
License:
Copyright (C) 2013 Matthew Turk. All Rights Reserved.
https://bitbucket.org/yt_analysis/yt-3.0/commits/f13ec690598b/
Changeset: f13ec690598b
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-29 01:36:17
Summary: Implement first draft of neighbor-finding.
Affected #: 1 file
diff -r 7326a8d3469f1377e7604c5454a65876cd130f26 -r f13ec690598b6e11068438e7ae099ae8caf6b15c yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -261,7 +261,50 @@
cdef int neighbors(self, OctInfo *oinfo, Oct** neighbors):
cdef Oct* candidate
nn = 0
- return 0
+ # We are going to do a brute-force search here.
+ # This is not the most efficient -- in fact, it's relatively bad. But
+ # we will attempt to improve it in a future iteration, where we will
+ # grow a stack of parent Octs.
+ # Note that in the first iteration, we will just find the up-to-27
+ # neighbors, including the main oct.
+ cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+ cdef OctList *olist, *my_list
+ my_list = olist = NULL
+ cdef Oct *cand
+ cdef np.int64_t npos[3]
+ for i in range(3):
+ npos[0] = oinfo.ipos[0] + (1 - i)
+ for j in range(3):
+ nj = 1 - j
+ npos[1] = oinfo.ipos[1] + (1 - j)
+ for k in range(3):
+ nk = 1 - k
+ npos[2] = oinfo.ipos[2] + (1 - k)
+ # Now we have our npos, which we just need to find.
+ cand = NULL
+ for level in range(oinfo.level + 1):
+ for n in range(3):
+ ind[n] = ((npos[n] >> (oinfo.level - level)) & 1)
+ if level == 0:
+ self.get_root(ind, &cand)
+ if cand == NULL: break
+ continue
+ if cand.children == NULL: break
+ ii = cind(ind[0],ind[1],ind[2])
+ if cand.children[ii] == NULL: break
+ cand = cand.children[ii]
+ if cand != NULL:
+ nfound += 1
+ olist = OctList_append(olist, cand)
+ if my_list == NULL: my_list = olist
+ olist = my_list
+ cdef int noct = OctList_count(olist)
+ neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
+ for i in range(noct):
+ neighbors[i] = olist.o
+ olist = olist.next
+ OctList_delete(my_list)
+ return noct
@cython.boundscheck(False)
@cython.wraparound(False)
@@ -707,24 +750,31 @@
local_filled += 1
return local_filled
-cdef OctList *OctList_append(OctList *list, Oct *o):
- cdef OctList *this = list
+cdef OctList *OctList_append(OctList *olist, Oct *o):
+ cdef OctList *this = olist
+ if olist == NULL:
+ this = <OctList *> malloc(sizeof(OctList))
+ this.next = NULL
+ this.o = o
+ return this
while this.next != NULL:
this = this.next
this.next = <OctList*> malloc(sizeof(OctList))
- this.next.o = o
- return this.next
+ this = this.next
+ this.o = o
+ this.next = NULL
+ return this
-cdef int OctList_count(OctList *list):
- cdef OctList *this = list
+cdef int OctList_count(OctList *olist):
+ cdef OctList *this = olist
cdef int i = 0 # Count the list
while this != NULL:
i += 1
this = this.next
return i
-cdef void OctList_delete(OctList *list):
- cdef OctList *next, *this = list
+cdef void OctList_delete(OctList *olist):
+ cdef OctList *next, *this = olist
while this != NULL:
next = this.next
free(this)
https://bitbucket.org/yt_analysis/yt-3.0/commits/02aa58342c33/
Changeset: 02aa58342c33
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-29 23:44:00
Summary: Finishing neighbors() implementation.
Affected #: 1 file
diff -r f13ec690598b6e11068438e7ae099ae8caf6b15c -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -184,8 +184,13 @@
return 0
cdef int get_root(self, int ind[3], Oct **o):
+ cdef int i
+ for i in range(3):
+ if ind[i] < 0 or ind[i] >= self.nn[i]:
+ o[0] = NULL
+ return 1
o[0] = self.root_mesh[ind[0]][ind[1]][ind[2]]
- return 1
+ return 0
@cython.boundscheck(False)
@cython.wraparound(False)
@@ -200,15 +205,18 @@
cdef Oct *cur, *next
cdef int i
cur = next = NULL
- level = 0
+ level = -1
for i in range(3):
dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
- ipos[i] = ind[i]
+ ipos[i] = 0
self.get_root(ind, &next)
# We want to stop recursing when there's nowhere else to go
while next != NULL:
+ level += 1
+ for i in range(3):
+ ipos[i] = (ipos[i] << 1) + ind[i]
cur = next
for i in range(3):
dds[i] = dds[i] / 2.0
@@ -220,9 +228,6 @@
cp[i] += dds[i]/2.0
if cur.children != NULL:
next = cur.children[cind(ind[0],ind[1],ind[2])]
- for i in range(3):
- ipos[i] = (ipos[i] << 1) + ind[i]
- level += 1
else:
next = NULL
if oinfo == NULL: return cur
@@ -258,7 +263,7 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef int neighbors(self, OctInfo *oinfo, Oct** neighbors):
+ cdef int neighbors(self, OctInfo *oi, Oct** neighbors):
cdef Oct* candidate
nn = 0
# We are going to do a brute-force search here.
@@ -271,25 +276,38 @@
cdef OctList *olist, *my_list
my_list = olist = NULL
cdef Oct *cand
- cdef np.int64_t npos[3]
+ cdef np.int64_t npos[3], ndim[3]
+ # Now we get our boundaries for this level, so that we can wrap around
+ # if need be.
for i in range(3):
- npos[0] = oinfo.ipos[0] + (1 - i)
+ ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i])/oi.dds[i])
+ for i in range(3):
+ npos[0] = (oi.ipos[0] + (1 - i))
+ if npos[0] < 0: npos[0] += ndim[0]
+ if npos[0] >= ndim[0]: npos[0] -= ndim[0]
for j in range(3):
nj = 1 - j
- npos[1] = oinfo.ipos[1] + (1 - j)
+ npos[1] = (oi.ipos[1] + (1 - j))
+ if npos[1] < 0: npos[1] += ndim[1]
+ if npos[1] >= ndim[1]: npos[1] -= ndim[1]
for k in range(3):
nk = 1 - k
- npos[2] = oinfo.ipos[2] + (1 - k)
+ npos[2] = (oi.ipos[2] + (1 - k))
+ if npos[2] < 0: npos[2] += ndim[2]
+ if npos[2] >= ndim[2]: npos[2] -= ndim[2]
# Now we have our npos, which we just need to find.
+ # Level 0 gets bootstrapped
+ for n in range(3):
+ ind[n] = ((npos[n] >> (oi.level + 1)) & 1)
cand = NULL
- for level in range(oinfo.level + 1):
+ self.get_root(ind, &cand)
+ # We should not get a NULL if we handle periodicity
+ # correctly, but we might.
+ if cand == NULL: continue
+ for level in range(1, oi.level+1):
+ if cand.children == NULL: break
for n in range(3):
- ind[n] = ((npos[n] >> (oinfo.level - level)) & 1)
- if level == 0:
- self.get_root(ind, &cand)
- if cand == NULL: break
- continue
- if cand.children == NULL: break
+ ind[n] = (npos[n] >> (oi.level - (level + 1))) & 1
ii = cind(ind[0],ind[1],ind[2])
if cand.children[ii] == NULL: break
cand = cand.children[ii]
https://bitbucket.org/yt_analysis/yt-3.0/commits/0e78005bde3d/
Changeset: 0e78005bde3d
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-30 02:38:50
Summary: First, untested draft of smoothing support.
Affected #: 5 files
diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -5,7 +5,7 @@
Affiliation: UC Santa Cruz
Author: Matthew Turk <matthewturk at gmail.com>
Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
License:
Copyright (C) 2013 Matthew Turk. All Rights Reserved.
@@ -32,7 +32,7 @@
from libc.math cimport sqrt
from fp_utils cimport *
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
cdef extern from "alloca.h":
void *alloca(int)
@@ -62,8 +62,7 @@
cdef class ParticleDepositOperation:
# We assume each will allocate and define their own temporary storage
cdef public object nvals
- cdef public int bad_indices
- cdef int update_values
+ cdef public int update_values
cdef void process(self, int dim[3], np.float64_t left_edge[3],
np.float64_t dds[3], np.int64_t offset,
np.float64_t ppos[3], np.float64_t *fields,
diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -54,7 +54,6 @@
fields = None, int domain_id = -1,
int domain_offset = 0):
cdef int nf, i, j
- self.bad_indices = 0
if fields is None:
fields = []
nf = len(fields)
diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/particle_smooth.pxd
--- /dev/null
+++ b/yt/geometry/particle_smooth.pxd
@@ -0,0 +1,49 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+ Copyright (C) 2013 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .particle_deposit cimport sph_kernel, gind
+
+cdef extern from "alloca.h":
+ void *alloca(int)
+
+cdef class ParticleSmoothOperation:
+ # We assume each will allocate and define their own temporary storage
+ cdef public object nvals
+ cdef void process(self, int dim[3], np.float64_t left_edge[3],
+ np.float64_t dds[3], np.float64_t *ppos,
+ np.float64_t **fields, np.int64_t nneighbors,
+ np.int64_t *nind, np.int64_t *doffs,
+ np.int64_t *pinds, np.int64_t *pcounts,
+ np.int64_t offset)
diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/particle_smooth.pyx
--- /dev/null
+++ b/yt/geometry/particle_smooth.pyx
@@ -0,0 +1,164 @@
+"""
+Particle smoothing in cells
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2013 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, realloc
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, \
+ OctreeContainer, OctInfo
+
+cdef class ParticleSmoothOperation:
+ def __init__(self, nvals):
+ # This is the set of cells, in grids, blocks or octs, we are handling.
+ self.nvals = nvals
+
+ def initialize(self, *args):
+ raise NotImplementedError
+
+ def finalize(self, *args):
+ raise NotImplementedError
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def process_octree(self, OctreeContainer octree,
+ np.ndarray[np.int64_t, ndim=1] dom_ind,
+ np.ndarray[np.float64_t, ndim=2] positions,
+ fields = None, int domain_id = -1,
+ int domain_offset = 0,
+ int test_neighbors = 0):
+ # This will be a several-step operation.
+ #
+ # We first take all of our particles and assign them to Octs. If they
+ # are not in an Oct, we will assume they are out of bounds. Note that
+ # this means that if we have loaded neighbor particles for which an Oct
+ # does not exist, we are going to be discarding them -- so sparse
+ # octrees will need to ensure that neighbor octs *exist*. Particles
+ # will be assigned in a new NumPy array. Note that this incurs
+ # overhead, but reduces complexity as we will now be able to use
+ # argsort.
+ #
+ # After the particles have been assigned to Octs, we process each Oct
+ # individually. We will do this by calling "get" for the *first*
+ # particle in each set of Octs in the sorted list. After this, we get
+ # neighbors for each Oct.
+ #
+ # Now, with the set of neighbors (and thus their indices) we allocate
+ # an array of particles and their fields, fill these in, and call our
+ # process function.
+ #
+ # This is not terribly efficient -- for starters, the neighbor function
+ # is not the most efficient yet. We will also need to handle some
+ # mechanism of an expandable array for holding pointers to Octs, so
+ # that we can deal with >27 neighbors. As I write this comment,
+ # neighbors() only returns 27 neighbors.
+ cdef int nf, i, j, dims[3]
+ cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos
+ cdef int nsize = 0
+ cdef np.int64_t *nind = NULL
+ cdef OctInfo oi
+ cdef Oct *oct, **neighbors = NULL
+ cdef np.int64_t nneighbors, numpart, offset, moff, local_ind
+ cdef np.int64_t *doffs, *pinds, *pcounts
+ cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+ cdef np.ndarray[np.float64_t, ndim=1] tarr
+ dims[0] = dims[1] = dims[2] = 2
+ numpart = positions.shape[0]
+ pcount = np.zeros_like(dom_ind)
+ doff = np.zeros_like(dom_ind) - 1
+ moff = octree.get_domain_offset(domain_id + domain_offset)
+ pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+ if fields is None:
+ fields = []
+ field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+ for i in range(nf):
+ tarr = fields[i]
+ field_pointers[i] = <np.float64_t *> tarr.data
+ for i in range(positions.shape[0]):
+ for j in range(3):
+ pos[j] = positions[i, j]
+ oct = octree.get(pos)
+ if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+ continue
+ # Note that this has to be our local index, not our in-file index.
+ # This is the particle count, which we'll use once we have sorted
+ # the particles to calculate the offsets into each oct's particles.
+ pcount[oct.domain_ind - moff] += 1
+ pdoms[i] = oct.domain_ind - moff # We store the *actual* offset.
+ # Now we have oct assignments. Let's sort them.
+ # Note that what we will be providing to our processing functions will
+ # actually be indirectly-sorted fields. This preserves memory at the
+ # expense of additional pointer lookups.
+ pind = np.argsort(pdoms)
+ # This now gives us the indices to the particles for each domain.
+ for i in range(positions.shape[0]):
+ # This is the domain_ind (minus moff) for this particle
+ offset = pdoms[pind[i]]
+ if doff[offset] < 0:
+ doff[offset] = i
+ # Now doff is full of offsets to the first entry in the pind that
+ # refers to that oct's particles.
+ ppos = <np.float64_t *> positions.data
+ doffs = <np.int64_t*> doff.data
+ pinds = <np.int64_t*> pind.data
+ pcounts = <np.int64_t*> pcount.data
+ for i in range(doff.shape[0]):
+ for j in range(3):
+ pos[j] = positions[pind[doff[i]], j]
+ oct = octree.get(pos, &oi)
+ if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+ continue
+ offset = dom_ind[oct.domain_ind - moff] * 8
+ nneighbors = octree.neighbors(&oi, neighbors)
+ # Now we have all our neighbors. And, we should be set for what
+ # else we need to do.
+ if nneighbors > nsize:
+ nind = <np.int64_t *> realloc(nind, nneighbors)
+ nsize = nneighbors
+ for j in range(nneighbors):
+ nind[j] = neighbors[j].domain_ind - moff
+ self.process(dims, oi.left_edge, oi.dds,
+ ppos, field_pointers, nneighbors, nind, doffs,
+ pinds, pcounts, offset)
+ if nind != NULL:
+ free(nind)
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def process_grid(self, gobj,
+ np.ndarray[np.float64_t, ndim=2] positions,
+ fields = None):
+ raise NotImplementedError
+
+ cdef void process(self, int dim[3], np.float64_t left_edge[3],
+ np.float64_t dds[3], np.float64_t *ppos,
+ np.float64_t **fields, np.int64_t nneighbors,
+ np.int64_t *nind, np.int64_t *doffs,
+ np.int64_t *pinds, np.int64_t *pcounts,
+ np.int64_t offset):
+ raise NotImplementedError
diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -43,6 +43,15 @@
"yt/geometry/oct_container.pxd",
"yt/geometry/selection_routines.pxd",
"yt/geometry/particle_deposit.pxd"])
+ config.add_extension("particle_smooth",
+ ["yt/geometry/particle_smooth.pyx"],
+ include_dirs=["yt/utilities/lib/"],
+ libraries=["m"],
+ depends=["yt/utilities/lib/fp_utils.pxd",
+ "yt/geometry/oct_container.pxd",
+ "yt/geometry/selection_routines.pxd",
+ "yt/geometry/particle_deposit.pxd",
+ "yt/geometry/particle_smooth.pxd"])
config.add_extension("fake_octree",
["yt/geometry/fake_octree.pyx"],
include_dirs=["yt/utilities/lib/"],
https://bitbucket.org/yt_analysis/yt-3.0/commits/970ba2d35422/
Changeset: 970ba2d35422
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-30 02:46:08
Summary: First "SimpleNeighborSmooth" object.
Next we need a heap-based priority queue for distance and neighbor
calculations.
Affected #: 2 files
diff -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 -r 970ba2d35422d407e947b7b2a61fc88c28f98f83 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -41,6 +41,7 @@
cdef class ParticleSmoothOperation:
# We assume each will allocate and define their own temporary storage
cdef public object nvals
+ cdef int nfields
cdef void process(self, int dim[3], np.float64_t left_edge[3],
np.float64_t dds[3], np.float64_t *ppos,
np.float64_t **fields, np.int64_t nneighbors,
diff -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 -r 970ba2d35422d407e947b7b2a61fc88c28f98f83 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -34,9 +34,10 @@
OctreeContainer, OctInfo
cdef class ParticleSmoothOperation:
- def __init__(self, nvals):
+ def __init__(self, nvals, nfields):
# This is the set of cells, in grids, blocks or octs, we are handling.
self.nvals = nvals
+ self.nfields = nfields
def initialize(self, *args):
raise NotImplementedError
@@ -162,3 +163,33 @@
np.int64_t *pinds, np.int64_t *pcounts,
np.int64_t offset):
raise NotImplementedError
+
+cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
+ cdef np.float64_t **fp
+ def initialize(self):
+ if self.nvals < 2:
+ # We need at least two fields, the smoothing length and the
+ # field to smooth, to operate.
+ raise RuntimeError
+ self.vals = [np.zeros(self.nvals, dtype="float64")
+ for i in range(self.nfields)]
+ self.fp = <np.float64_t **> malloc(
+ sizeof(np.float64_t *) * self.nfields)
+
+ def finalize(self):
+ free(self.fp)
+ return self.vals
+
+ cdef void process(self, int dim[3], np.float64_t left_edge[3],
+ np.float64_t dds[3], np.float64_t *ppos,
+ np.float64_t **fields, np.int64_t nneighbors,
+ np.int64_t *nind, np.int64_t *doffs,
+ np.int64_t *pinds, np.int64_t *pcounts,
+ np.int64_t offset):
+ # Note that we assume that fields[0] == smoothing length in the native
+ # units supplied. We can now iterate over every cell in the block and
+ # every particle to find the nearest. We will use a priority heap.
+ raise NotImplementedError
+
+simple_neighbor_smooth = SimpleNeighborSmooth
+
https://bitbucket.org/yt_analysis/yt-3.0/commits/916c066ad226/
Changeset: 916c066ad226
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-30 03:36:09
Summary: Simplifying API a bit and adding neighbor finding.
Affected #: 2 files
diff -r 970ba2d35422d407e947b7b2a61fc88c28f98f83 -r 916c066ad2267c40e3a222adc4b0bbc21965a709 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -38,13 +38,52 @@
cdef extern from "alloca.h":
void *alloca(int)
+cdef struct NeighborList
+cdef struct NeighborList:
+ np.int64_t pn # Particle number
+ np.float64_t r2 # radius**2
+
+cdef inline np.float64_t r2dist(np.float64_t ppos[3],
+ np.float64_t cpos[3],
+ np.float64_t DW[3]):
+ cdef int i
+ cdef np.float64_t r2, DR
+ r2 = 0.0
+ for i in range(3):
+ DR = (ppos[i] - cpos[i])
+ if (DR > DW[i]/2.0):
+ DR -= DW[i]/2.0
+ elif (DR < -DW[i]/2.0):
+ DR += DW[i]/2.0
+ r2 += DR * DR
+ return r2
+
cdef class ParticleSmoothOperation:
# We assume each will allocate and define their own temporary storage
cdef public object nvals
+ cdef np.float64_t DW[3]
cdef int nfields
- cdef void process(self, int dim[3], np.float64_t left_edge[3],
- np.float64_t dds[3], np.float64_t *ppos,
- np.float64_t **fields, np.int64_t nneighbors,
- np.int64_t *nind, np.int64_t *doffs,
- np.int64_t *pinds, np.int64_t *pcounts,
- np.int64_t offset)
+ cdef int maxn
+ cdef int curn
+ # Note that we are preallocating here, so this is *not* threadsafe.
+ cdef NeighborList *neighbors
+ cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+ np.float64_t dds[3], np.float64_t *ppos,
+ np.float64_t **fields, np.int64_t nneighbors,
+ np.int64_t *nind, np.int64_t *doffs,
+ np.int64_t *pinds, np.int64_t *pcounts,
+ np.int64_t offset)
+ cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+ np.float64_t cpos[3])
+ cdef void neighbor_reset(self)
+ cdef void neighbor_find(self,
+ np.int64_t nneighbors,
+ np.int64_t *nind,
+ np.int64_t *doffs,
+ np.int64_t *pcounts,
+ np.int64_t *pinds,
+ np.float64_t *ppos,
+ np.float64_t cpos[3])
+ cdef void process(self, np.int64_t offset, int i, int j, int k,
+ int dim[3], np.float64_t cpos[3])
+
diff -r 970ba2d35422d407e947b7b2a61fc88c28f98f83 -r 916c066ad2267c40e3a222adc4b0bbc21965a709 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -34,10 +34,15 @@
OctreeContainer, OctInfo
cdef class ParticleSmoothOperation:
- def __init__(self, nvals, nfields):
+ def __init__(self, nvals, nfields, max_neighbors):
# This is the set of cells, in grids, blocks or octs, we are handling.
+ cdef int i
self.nvals = nvals
self.nfields = nfields
+ self.maxn = max_neighbors
+ self.neighbors = <NeighborList *> malloc(
+ sizeof(NeighborList) * self.maxn)
+ self.neighbor_reset()
def initialize(self, *args):
raise NotImplementedError
@@ -94,12 +99,15 @@
doff = np.zeros_like(dom_ind) - 1
moff = octree.get_domain_offset(domain_id + domain_offset)
pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+ nf = len(fields)
if fields is None:
fields = []
field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
for i in range(nf):
tarr = fields[i]
field_pointers[i] = <np.float64_t *> tarr.data
+ for i in range(3):
+ self.DW[i] = (octree.DRE[i] - octree.DLE[i])
for i in range(positions.shape[0]):
for j in range(3):
pos[j] = positions[i, j]
@@ -143,7 +151,7 @@
nsize = nneighbors
for j in range(nneighbors):
nind[j] = neighbors[j].domain_ind - moff
- self.process(dims, oi.left_edge, oi.dds,
+ self.neighbor_process(dims, oi.left_edge, oi.dds,
ppos, field_pointers, nneighbors, nind, doffs,
pinds, pcounts, offset)
if nind != NULL:
@@ -156,14 +164,103 @@
fields = None):
raise NotImplementedError
- cdef void process(self, int dim[3], np.float64_t left_edge[3],
- np.float64_t dds[3], np.float64_t *ppos,
- np.float64_t **fields, np.int64_t nneighbors,
- np.int64_t *nind, np.int64_t *doffs,
- np.int64_t *pinds, np.int64_t *pcounts,
- np.int64_t offset):
+ cdef void process(self, np.int64_t offset, int i, int j, int k,
+ int dim[3], np.float64_t cpos[3]):
raise NotImplementedError
+ cdef void neighbor_reset(self):
+ self.curn = 0
+ for i in range(self.maxn):
+ self.neighbors[i].pn = -1
+ self.neighbors[i].r2 = 1e300
+
+ cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+ np.float64_t cpos[3]):
+ cdef NeighborList *cur
+ cdef int i
+ # _c means candidate (what we're evaluating)
+ # _o means other (the item in the list)
+ cdef np.float64_t r2_c, r2_o
+ cdef np.int64_t pn_c, pn_o
+ if self.curn < self.maxn:
+ cur = &self.neighbors[self.curn]
+ cur.pn = pn
+ cur.r2 = r2dist(ppos, cpos, self.DW)
+ self.curn += 1
+ return
+ # This will go (curn - 1) through 0.
+ r2_c = r2dist(ppos, cpos, self.DW)
+ pn_c = pn
+ for i in range((self.curn - 1), -1, -1):
+ # First we evaluate against i. If our candidate radius is greater
+ # than the one we're inspecting, we quit early.
+ cur = &self.neighbors[i]
+ r2_o = cur.r2
+ pn_o = cur.pn
+ if r2_c >= r2_o:
+ break
+ # Now we know we need to swap them. First we assign our candidate
+ # values to cur.
+ cur.r2 = r2_c
+ cur.pn = pn_c
+ if i + 1 >= self.maxn:
+ continue # No swapping
+ cur = &self.neighbors[i + 1]
+ cur.r2 = r2_o
+ cur.pn = pn_o
+ # At this point, we've evaluated all the particles and we should have a
+ # sorted set of values. So, we're done.
+
+ cdef void neighbor_find(self,
+ np.int64_t nneighbors,
+ np.int64_t *nind,
+ np.int64_t *doffs,
+ np.int64_t *pcounts,
+ np.int64_t *pinds,
+ np.float64_t *ppos,
+ np.float64_t cpos[3]
+ ):
+ # We are now given the number of neighbors, the indices into the
+ # domains for them, and the number of particles for each.
+ cdef int ni, i, j
+ cdef np.int64_t offset, pn, pc
+ cdef np.float64_t pos[3]
+ self.neighbor_reset()
+ for ni in range(nneighbors):
+ offset = doffs[nind[ni]]
+ pc = pcounts[nind[ni]]
+ for i in range(pc):
+ pn = pinds[offset + i]
+ for j in range(3):
+ pos[j] = ppos[pn * 3 + j]
+ self.neighbor_eval(pn, pos, cpos)
+
+ cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+ np.float64_t dds[3], np.float64_t *ppos,
+ np.float64_t **fields, np.int64_t nneighbors,
+ np.int64_t *nind, np.int64_t *doffs,
+ np.int64_t *pinds, np.int64_t *pcounts,
+ np.int64_t offset):
+ # Note that we assume that fields[0] == smoothing length in the native
+ # units supplied. We can now iterate over every cell in the block and
+ # every particle to find the nearest. We will use a priority heap.
+ cdef int i, j, k
+ cdef np.float64_t cpos[3]
+ cpos[0] = left_edge[0] + 0.5*dds[0]
+ for i in range(dim[0]):
+ cpos[1] = left_edge[1] + 0.5*dds[1]
+ for j in range(dim[1]):
+ cpos[2] = left_edge[2] + 0.5*dds[2]
+ for k in range(dim[2]):
+ self.neighbor_find(nneighbors, nind, doffs, pcounts,
+ pinds, ppos, cpos)
+ # Now we have all our neighbors in our neighbor list.
+ self.process(offset, i, j, k, dim, cpos)
+ cpos[2] += dds[2]
+ cpos[1] += dds[1]
+ cpos[0] += dds[0]
+
+
cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
cdef np.float64_t **fp
def initialize(self):
@@ -180,15 +277,9 @@
free(self.fp)
return self.vals
- cdef void process(self, int dim[3], np.float64_t left_edge[3],
- np.float64_t dds[3], np.float64_t *ppos,
- np.float64_t **fields, np.int64_t nneighbors,
- np.int64_t *nind, np.int64_t *doffs,
- np.int64_t *pinds, np.int64_t *pcounts,
- np.int64_t offset):
- # Note that we assume that fields[0] == smoothing length in the native
- # units supplied. We can now iterate over every cell in the block and
- # every particle to find the nearest. We will use a priority heap.
+ cdef void process(self, np.int64_t offset, int i, int j, int k,
+ int dim[3], np.float64_t cpos[3]):
+ # We have our i, j, k for our cell
raise NotImplementedError
simple_neighbor_smooth = SimpleNeighborSmooth
https://bitbucket.org/yt_analysis/yt-3.0/commits/de63479c8fb3/
Changeset: de63479c8fb3
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-30 06:39:37
Summary: Implement first pass at .smooth() for Octree.
Affected #: 5 files
diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,6 +36,7 @@
NeedsProperty, \
NeedsParameter
import yt.geometry.particle_deposit as particle_deposit
+import yt.geometry.particle_smooth as particle_smooth
from yt.funcs import *
class OctreeSubset(YTSelectionContainer):
@@ -124,6 +125,23 @@
if vals is None: return
return np.asfortranarray(vals)
+ def smooth(self, positions, fields = None, method = None):
+ # Here we perform our particle deposition.
+ cls = getattr(particle_smooth, "%s_smooth" % method, None)
+ if cls is None:
+ raise YTParticleDepositionNotImplemented(method)
+ nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
+ if fields is None: fields = []
+ op = cls(nvals, len(fields), 64)
+ op.initialize()
+ mylog.debug("Smoothing %s particles into %s Octs",
+ positions.shape[0], nvals[-1])
+ op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+ self.domain_id, self._domain_offset)
+ vals = op.finalize()
+ if vals is None: return
+ return np.asfortranarray(vals)
+
def select_icoords(self, dobj):
d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -72,7 +72,7 @@
cdef public int max_domain
cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
cdef int get_root(self, int ind[3], Oct **o)
- cdef int neighbors(self, OctInfo *oinfo, Oct **neighbors)
+ cdef int neighbors(self, OctInfo *oinfo, Oct ***neighbors)
cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
# This function must return the offset from global-to-local domains; i.e.,
# OctAllocationContainer.offset if such a thing exists.
diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -263,7 +263,7 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef int neighbors(self, OctInfo *oi, Oct** neighbors):
+ cdef int neighbors(self, OctInfo *oi, Oct*** neighbors):
cdef Oct* candidate
nn = 0
# We are going to do a brute-force search here.
@@ -317,9 +317,9 @@
if my_list == NULL: my_list = olist
olist = my_list
cdef int noct = OctList_count(olist)
- neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
+ neighbors[0] = <Oct **> malloc(sizeof(Oct*)*noct)
for i in range(noct):
- neighbors[i] = olist.o
+ neighbors[0][i] = olist.o
olist = olist.next
OctList_delete(my_list)
return noct
diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -65,6 +65,10 @@
cdef int nfields
cdef int maxn
cdef int curn
+ cdef np.int64_t *doffs
+ cdef np.int64_t *pinds
+ cdef np.int64_t *pcounts
+ cdef np.float64_t *ppos
# Note that we are preallocating here, so this is *not* threadsafe.
cdef NeighborList *neighbors
cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -40,6 +40,7 @@
self.nvals = nvals
self.nfields = nfields
self.maxn = max_neighbors
+ print "CREATED", nvals, nfields, max_neighbors
self.neighbors = <NeighborList *> malloc(
sizeof(NeighborList) * self.maxn)
self.neighbor_reset()
@@ -128,8 +129,7 @@
for i in range(positions.shape[0]):
# This is the domain_ind (minus moff) for this particle
offset = pdoms[pind[i]]
- if doff[offset] < 0:
- doff[offset] = i
+ if doff[offset] < 0: doff[offset] = i
# Now doff is full of offsets to the first entry in the pind that
# refers to that oct's particles.
ppos = <np.float64_t *> positions.data
@@ -143,7 +143,7 @@
if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
continue
offset = dom_ind[oct.domain_ind - moff] * 8
- nneighbors = octree.neighbors(&oi, neighbors)
+ nneighbors = octree.neighbors(&oi, &neighbors)
# Now we have all our neighbors. And, we should be set for what
# else we need to do.
if nneighbors > nsize:
@@ -154,6 +154,9 @@
self.neighbor_process(dims, oi.left_edge, oi.dds,
ppos, field_pointers, nneighbors, nind, doffs,
pinds, pcounts, offset)
+ # This is allocated by the neighbors function, so we deallocate it.
+ free(neighbors)
+ neighbors = NULL
if nind != NULL:
free(nind)
@@ -263,6 +266,7 @@
cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
cdef np.float64_t **fp
+ cdef public object vals
def initialize(self):
if self.nvals < 2:
# We need at least two fields, the smoothing length and the
@@ -280,7 +284,7 @@
cdef void process(self, np.int64_t offset, int i, int j, int k,
int dim[3], np.float64_t cpos[3]):
# We have our i, j, k for our cell
- raise NotImplementedError
+ #print "Offset", offset, i, j, k, self.curn
+ return
simple_neighbor_smooth = SimpleNeighborSmooth
-
https://bitbucket.org/yt_analysis/yt-3.0/commits/187c82b93c73/
Changeset: 187c82b93c73
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-30 06:51:59
Summary: If domain offset < 0, no particles.
Affected #: 1 file
diff -r de63479c8fb3f514ce9fd127b584192390bd2a87 -r 187c82b93c733aa697f94474667f345657d4cac9 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -51,6 +51,7 @@
def finalize(self, *args):
raise NotImplementedError
+ @cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)
def process_octree(self, OctreeContainer octree,
@@ -136,7 +137,10 @@
doffs = <np.int64_t*> doff.data
pinds = <np.int64_t*> pind.data
pcounts = <np.int64_t*> pcount.data
+ cdef np.int64_t pn
for i in range(doff.shape[0]):
+ # Nothing assigned.
+ if doff[i] < 0: continue
for j in range(3):
pos[j] = positions[pind[doff[i]], j]
oct = octree.get(pos, &oi)
@@ -160,6 +164,7 @@
if nind != NULL:
free(nind)
+ @cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)
def process_grid(self, gobj,
https://bitbucket.org/yt_analysis/yt-3.0/commits/781176f368f9/
Changeset: 781176f368f9
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-30 17:22:59
Summary: It makes more sense to return a neighbors** than to fill it. Also fix segfault
from undeclared nsize.
Affected #: 3 files
diff -r 187c82b93c733aa697f94474667f345657d4cac9 -r 781176f368f9c2cdf164576e7aa345b7cc266b6e yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -72,7 +72,7 @@
cdef public int max_domain
cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
cdef int get_root(self, int ind[3], Oct **o)
- cdef int neighbors(self, OctInfo *oinfo, Oct ***neighbors)
+ cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors)
cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
# This function must return the offset from global-to-local domains; i.e.,
# OctAllocationContainer.offset if such a thing exists.
diff -r 187c82b93c733aa697f94474667f345657d4cac9 -r 781176f368f9c2cdf164576e7aa345b7cc266b6e yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -263,7 +263,7 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef int neighbors(self, OctInfo *oi, Oct*** neighbors):
+ cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors):
cdef Oct* candidate
nn = 0
# We are going to do a brute-force search here.
@@ -317,12 +317,14 @@
if my_list == NULL: my_list = olist
olist = my_list
cdef int noct = OctList_count(olist)
- neighbors[0] = <Oct **> malloc(sizeof(Oct*)*noct)
+ cdef Oct **neighbors
+ neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
for i in range(noct):
- neighbors[0][i] = olist.o
+ neighbors[i] = olist.o
olist = olist.next
OctList_delete(my_list)
- return noct
+ nneighbors[0] = noct
+ return neighbors
@cython.boundscheck(False)
@cython.wraparound(False)
@@ -770,7 +772,7 @@
cdef OctList *OctList_append(OctList *olist, Oct *o):
cdef OctList *this = olist
- if olist == NULL:
+ if this == NULL:
this = <OctList *> malloc(sizeof(OctList))
this.next = NULL
this.o = o
diff -r 187c82b93c733aa697f94474667f345657d4cac9 -r 781176f368f9c2cdf164576e7aa345b7cc266b6e yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -138,6 +138,8 @@
pinds = <np.int64_t*> pind.data
pcounts = <np.int64_t*> pcount.data
cdef np.int64_t pn
+ nsize = 27
+ nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
for i in range(doff.shape[0]):
# Nothing assigned.
if doff[i] < 0: continue
@@ -147,20 +149,20 @@
if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
continue
offset = dom_ind[oct.domain_ind - moff] * 8
- nneighbors = octree.neighbors(&oi, &neighbors)
+ neighbors = octree.neighbors(&oi, &nneighbors)
# Now we have all our neighbors. And, we should be set for what
# else we need to do.
if nneighbors > nsize:
- nind = <np.int64_t *> realloc(nind, nneighbors)
+ nind = <np.int64_t *> realloc(
+ nind, sizeof(np.int64_t)*nneighbors)
nsize = nneighbors
for j in range(nneighbors):
nind[j] = neighbors[j].domain_ind - moff
+ free(neighbors)
self.neighbor_process(dims, oi.left_edge, oi.dds,
ppos, field_pointers, nneighbors, nind, doffs,
pinds, pcounts, offset)
# This is allocated by the neighbors function, so we deallocate it.
- free(neighbors)
- neighbors = NULL
if nind != NULL:
free(nind)
https://bitbucket.org/yt_analysis/yt-3.0/commits/eaa322474306/
Changeset: eaa322474306
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-30 17:38:20
Summary: Removed print statement.
Affected #: 1 file
diff -r 781176f368f9c2cdf164576e7aa345b7cc266b6e -r eaa32247430624efb2a3f32c99447c6d9f576117 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -40,7 +40,6 @@
self.nvals = nvals
self.nfields = nfields
self.maxn = max_neighbors
- print "CREATED", nvals, nfields, max_neighbors
self.neighbors = <NeighborList *> malloc(
sizeof(NeighborList) * self.maxn)
self.neighbor_reset()
https://bitbucket.org/yt_analysis/yt-3.0/commits/c2a86529276e/
Changeset: c2a86529276e
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-31 00:49:33
Summary: Continuing simple neighbor smoothing implementation.
Currently gives something of garbage results. They look vaguely like data ...
but not like the right data.
Affected #: 3 files
diff -r eaa32247430624efb2a3f32c99447c6d9f576117 -r c2a86529276e5f2accddb40027deb47da07a8dea yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -140,7 +140,11 @@
self.domain_id, self._domain_offset)
vals = op.finalize()
if vals is None: return
- return np.asfortranarray(vals)
+ if isinstance(vals, list):
+ vals = [np.asfortranarray(v) for v in vals]
+ else:
+ vals = np.asfortranarray(vals)
+ return vals
def select_icoords(self, dobj):
d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
diff -r eaa32247430624efb2a3f32c99447c6d9f576117 -r c2a86529276e5f2accddb40027deb47da07a8dea yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -89,5 +89,4 @@
np.float64_t *ppos,
np.float64_t cpos[3])
cdef void process(self, np.int64_t offset, int i, int j, int k,
- int dim[3], np.float64_t cpos[3])
-
+ int dim[3], np.float64_t cpos[3], np.float64_t **fields)
diff -r eaa32247430624efb2a3f32c99447c6d9f576117 -r c2a86529276e5f2accddb40027deb47da07a8dea yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -174,7 +174,7 @@
raise NotImplementedError
cdef void process(self, np.int64_t offset, int i, int j, int k,
- int dim[3], np.float64_t cpos[3]):
+ int dim[3], np.float64_t cpos[3], np.float64_t **fields):
raise NotImplementedError
cdef void neighbor_reset(self):
@@ -264,7 +264,7 @@
self.neighbor_find(nneighbors, nind, doffs, pcounts,
pinds, ppos, cpos)
# Now we have all our neighbors in our neighbor list.
- self.process(offset, i, j, k, dim, cpos)
+ self.process(offset, i, j, k, dim, cpos, fields)
cpos[2] += dds[2]
cpos[1] += dds[1]
cpos[0] += dds[0]
@@ -274,23 +274,42 @@
cdef np.float64_t **fp
cdef public object vals
def initialize(self):
- if self.nvals < 2:
+ cdef int i
+ if self.nfields < 4:
# We need at least two fields, the smoothing length and the
# field to smooth, to operate.
raise RuntimeError
- self.vals = [np.zeros(self.nvals, dtype="float64")
- for i in range(self.nfields)]
+ cdef np.ndarray tarr
self.fp = <np.float64_t **> malloc(
sizeof(np.float64_t *) * self.nfields)
+ self.vals = []
+ for i in range(self.nfields):
+ tarr = np.zeros(self.nvals, dtype="float64", order="F")
+ self.vals.append(tarr)
+ self.fp[i] = <np.float64_t *> tarr.data
def finalize(self):
free(self.fp)
return self.vals
cdef void process(self, np.int64_t offset, int i, int j, int k,
- int dim[3], np.float64_t cpos[3]):
- # We have our i, j, k for our cell
- #print "Offset", offset, i, j, k, self.curn
+ int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+ # We have our i, j, k for our cell, as well as the cell position.
+ # We also have a list of neighboring particles with particle numbers.
+ cdef int n, fi
+ cdef np.float64_t weight, r2, val
+ cdef np.int64_t pn
+ for n in range(self.curn):
+ # No normalization for the moment.
+ # fields[0] is the smoothing length.
+ r2 = self.neighbors[n].r2
+ pn = self.neighbors[n].pn
+ # Smoothing kernel weight function
+ weight = sph_kernel(sqrt(r2) / fields[0][pn])
+ # Mass of the particle times the value divided by the Density
+ for fi in range(self.nfields - 3):
+ val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
+ self.fp[fi + 3][gind(i,j,k,dim) + offset] = val
return
simple_neighbor_smooth = SimpleNeighborSmooth
https://bitbucket.org/yt_analysis/yt-3.0/commits/af666312fecd/
Changeset: af666312fecd
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-31 00:58:20
Summary: Add a quick check that we don't double-process an oct and its neighbors.
Affected #: 1 file
diff -r c2a86529276e5f2accddb40027deb47da07a8dea -r af666312fecdf5fee9752b3f8447cf64e282744c yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -84,7 +84,7 @@
# mechanism of an expandable array for holding pointers to Octs, so
# that we can deal with >27 neighbors. As I write this comment,
# neighbors() only returns 27 neighbors.
- cdef int nf, i, j, dims[3]
+ cdef int nf, i, j, dims[3], n
cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos
cdef int nsize = 0
cdef np.int64_t *nind = NULL
@@ -157,6 +157,10 @@
nsize = nneighbors
for j in range(nneighbors):
nind[j] = neighbors[j].domain_ind - moff
+ for n in range(j):
+ if nind[j] == nind[n]:
+ nind[j] = -1
+ break
free(neighbors)
self.neighbor_process(dims, oi.left_edge, oi.dds,
ppos, field_pointers, nneighbors, nind, doffs,
@@ -236,6 +240,7 @@
cdef np.float64_t pos[3]
self.neighbor_reset()
for ni in range(nneighbors):
+ if nind[ni] == -1: continue
offset = doffs[nind[ni]]
pc = pcounts[nind[ni]]
for i in range(pc):
https://bitbucket.org/yt_analysis/yt-3.0/commits/fedd682f5272/
Changeset: fedd682f5272
Branch: yt-3.0
User: MatthewTurk
Date: 2013-07-31 03:34:31
Summary: Adding smarter sorting, fixing neighbor finding.
This fixes the problem of oddness at higher levels. I verified this by
inspecting whether or not (when nneighbors == 27) the 13th item is the oct
itself.
Affected #: 3 files
diff -r af666312fecdf5fee9752b3f8447cf64e282744c -r fedd682f5272d3d6813b5ef8152a5a7a13b6a68d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -243,7 +243,7 @@
oinfo.dds[i] = dds[i] # Cell width
oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
oinfo.ipos[i] = ipos[i]
- oinfo.level = level
+ oinfo.level = level
return cur
def domain_identify(self, SelectorObject selector):
@@ -280,25 +280,23 @@
# Now we get our boundaries for this level, so that we can wrap around
# if need be.
for i in range(3):
- ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i])/oi.dds[i])
+ ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i])/(2*oi.dds[i]))
for i in range(3):
npos[0] = (oi.ipos[0] + (1 - i))
if npos[0] < 0: npos[0] += ndim[0]
if npos[0] >= ndim[0]: npos[0] -= ndim[0]
for j in range(3):
- nj = 1 - j
npos[1] = (oi.ipos[1] + (1 - j))
if npos[1] < 0: npos[1] += ndim[1]
if npos[1] >= ndim[1]: npos[1] -= ndim[1]
for k in range(3):
- nk = 1 - k
npos[2] = (oi.ipos[2] + (1 - k))
if npos[2] < 0: npos[2] += ndim[2]
if npos[2] >= ndim[2]: npos[2] -= ndim[2]
# Now we have our npos, which we just need to find.
# Level 0 gets bootstrapped
for n in range(3):
- ind[n] = ((npos[n] >> (oi.level + 1)) & 1)
+ ind[n] = ((npos[n] >> (oi.level)) & 1)
cand = NULL
self.get_root(ind, &cand)
# We should not get a NULL if we handle periodicity
@@ -307,7 +305,7 @@
for level in range(1, oi.level+1):
if cand.children == NULL: break
for n in range(3):
- ind[n] = (npos[n] >> (oi.level - (level + 1))) & 1
+ ind[n] = (npos[n] >> (oi.level - (level))) & 1
ii = cind(ind[0],ind[1],ind[2])
if cand.children[ii] == NULL: break
cand = cand.children[ii]
diff -r af666312fecdf5fee9752b3f8447cf64e282744c -r fedd682f5272d3d6813b5ef8152a5a7a13b6a68d yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -27,7 +27,7 @@
cimport numpy as np
import numpy as np
-from libc.stdlib cimport malloc, free
+from libc.stdlib cimport malloc, free, qsort
cimport cython
from libc.math cimport sqrt
diff -r af666312fecdf5fee9752b3f8447cf64e282744c -r fedd682f5272d3d6813b5ef8152a5a7a13b6a68d yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -33,6 +33,19 @@
from oct_container cimport Oct, OctAllocationContainer, \
OctreeContainer, OctInfo
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+ cdef NeighborList *n1, *n2
+ n1 = <NeighborList *> on1
+ n2 = <NeighborList *> on2
+ # Note that we set this up so that "greatest" evaluates to the *end* of the
+ # list, so we can do standard radius comparisons.
+ if n1.r2 < n2.r2:
+ return -1
+ elif n1.r2 == n2.r2:
+ return 0
+ else:
+ return 1
+
cdef class ParticleSmoothOperation:
def __init__(self, nvals, nfields, max_neighbors):
# This is the set of cells, in grids, blocks or octs, we are handling.
@@ -91,14 +104,18 @@
cdef OctInfo oi
cdef Oct *oct, **neighbors = NULL
cdef np.int64_t nneighbors, numpart, offset, moff, local_ind
- cdef np.int64_t *doffs, *pinds, *pcounts
+ cdef np.int64_t *doffs, *pinds, *pcounts, poff
cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
cdef np.ndarray[np.float64_t, ndim=1] tarr
dims[0] = dims[1] = dims[2] = 2
numpart = positions.shape[0]
+ # pcount is the number of particles per oct.
pcount = np.zeros_like(dom_ind)
+ # doff is the offset to a given oct in the sorted particles.
doff = np.zeros_like(dom_ind) - 1
moff = octree.get_domain_offset(domain_id + domain_offset)
+ # pdoms points particles at their octs. So the value in this array, for
+ # a given index, is the local oct index.
pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
nf = len(fields)
if fields is None:
@@ -118,17 +135,24 @@
# Note that this has to be our local index, not our in-file index.
# This is the particle count, which we'll use once we have sorted
# the particles to calculate the offsets into each oct's particles.
- pcount[oct.domain_ind - moff] += 1
- pdoms[i] = oct.domain_ind - moff # We store the *actual* offset.
+ offset = oct.domain_ind - moff
+ pcount[offset] += 1
+ pdoms[i] = offset # We store the *actual* offset.
# Now we have oct assignments. Let's sort them.
# Note that what we will be providing to our processing functions will
# actually be indirectly-sorted fields. This preserves memory at the
# expense of additional pointer lookups.
pind = np.argsort(pdoms)
+ # So what this means is that we now have all the oct-0 particle indices
+ # in order, then the oct-1, etc etc.
# This now gives us the indices to the particles for each domain.
for i in range(positions.shape[0]):
- # This is the domain_ind (minus moff) for this particle
- offset = pdoms[pind[i]]
+ # This value, poff, is the index of the particle in the *unsorted*
+ # arrays.
+ poff = pind[i]
+ offset = pdoms[poff]
+ # If we have yet to assign the starting index to this oct, we do so
+ # now.
if doff[offset] < 0: doff[offset] = i
# Now doff is full of offsets to the first entry in the pind that
# refers to that oct's particles.
@@ -136,14 +160,16 @@
doffs = <np.int64_t*> doff.data
pinds = <np.int64_t*> pind.data
pcounts = <np.int64_t*> pcount.data
- cdef np.int64_t pn
nsize = 27
nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
for i in range(doff.shape[0]):
# Nothing assigned.
if doff[i] < 0: continue
+ # The first particle assigned to this oct should be the one we
+ # want.
+ poff = pind[doff[i]]
for j in range(3):
- pos[j] = positions[pind[doff[i]], j]
+ pos[j] = positions[poff, j]
oct = octree.get(pos, &oi)
if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
continue
@@ -161,11 +187,11 @@
if nind[j] == nind[n]:
nind[j] = -1
break
+ # This is allocated by the neighbors function, so we deallocate it.
free(neighbors)
self.neighbor_process(dims, oi.left_edge, oi.dds,
ppos, field_pointers, nneighbors, nind, doffs,
pinds, pcounts, offset)
- # This is allocated by the neighbors function, so we deallocate it.
if nind != NULL:
free(nind)
@@ -195,18 +221,26 @@
# _o means other (the item in the list)
cdef np.float64_t r2_c, r2_o
cdef np.int64_t pn_c, pn_o
+ # If we're less than the maximum number of neighbors, we simply append.
+ # After that, we will sort, and then only compare against the rightmost
+ # entries.
if self.curn < self.maxn:
cur = &self.neighbors[self.curn]
cur.pn = pn
cur.r2 = r2dist(ppos, cpos, self.DW)
self.curn += 1
+ if self.curn == self.maxn:
+ # This time we sort it, so that future insertions will be able
+ # to be done in order.
+ qsort(self.neighbors, self.curn, sizeof(NeighborList),
+ Neighbor_compare)
return
# This will go (curn - 1) through 0.
r2_c = r2dist(ppos, cpos, self.DW)
pn_c = pn
for i in range((self.curn - 1), -1, -1):
# First we evaluate against i. If our candidate radius is greater
- # than the one we're inspecting, we quit early.
+ # than the one we're inspecting, we quit.
cur = &self.neighbors[i]
r2_o = cur.r2
pn_o = cur.pn
@@ -314,7 +348,7 @@
# Mass of the particle times the value divided by the Density
for fi in range(self.nfields - 3):
val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
- self.fp[fi + 3][gind(i,j,k,dim) + offset] = val
+ self.fp[fi + 3][gind(i,j,k,dim) + offset] = val * weight
return
simple_neighbor_smooth = SimpleNeighborSmooth
https://bitbucket.org/yt_analysis/yt-3.0/commits/be5daa03f219/
Changeset: be5daa03f219
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-13 16:08:26
Summary: If passed None, assume we have no periodicity.
Affected #: 1 file
diff -r fedd682f5272d3d6813b5ef8152a5a7a13b6a68d -r be5daa03f219683784d5e415beb3b626ba18fb59 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -121,15 +121,19 @@
self.min_level = getattr(dobj, "min_level", 0)
self.max_level = getattr(dobj, "max_level", 99)
self.overlap_cells = 0
+ if dobj is None:
+ for i in range(3):
+ self.periodicity[i] = False
+ self.domain_width[i] = 0.0
+ else:
+ for i in range(3) :
+ if dobj.pf.periodicity[i] and dobj.pf.domain_left_edge[i] != 0.0 :
+ print "SelectorObject periodicity assumes left_edge == 0"
+ raise RuntimeError
- for i in range(3) :
- if dobj.pf.periodicity[i] and dobj.pf.domain_left_edge[i] != 0.0 :
- print "SelectorObject periodicity assumes left_edge == 0"
- raise RuntimeError
-
- self.domain_width[i] = dobj.pf.domain_right_edge[i] - \
- dobj.pf.domain_left_edge[i]
- self.periodicity[i] = dobj.pf.periodicity[i]
+ self.domain_width[i] = dobj.pf.domain_right_edge[i] - \
+ dobj.pf.domain_left_edge[i]
+ self.periodicity[i] = dobj.pf.periodicity[i]
@cython.boundscheck(False)
@cython.wraparound(False)
https://bitbucket.org/yt_analysis/yt-3.0/commits/d02eca0d4b95/
Changeset: d02eca0d4b95
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-21 21:30:15
Summary: Removing unused functions and adding TODO notes.
Affected #: 4 files
diff -r be5daa03f219683784d5e415beb3b626ba18fb59 -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -332,6 +332,7 @@
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
cdef np.ndarray[np.int64_t, ndim=2] coords
+ # TODO: This *8 needs to be made generic.
coords = np.empty((num_octs * 8, 3), dtype="int64")
cdef OctVisitorData data
data.array = <void *> coords.data
@@ -349,6 +350,7 @@
num_octs = selector.count_octs(self, domain_id)
#Return the 'resolution' of each cell; ie the level
cdef np.ndarray[np.int64_t, ndim=1] res
+ # TODO: This *8 needs to be made generic.
res = np.empty(num_octs * 8, dtype="int64")
cdef OctVisitorData data
data.array = <void *> res.data
@@ -365,6 +367,7 @@
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
cdef np.ndarray[np.float64_t, ndim=2] fwidth
+ # TODO: This *8 needs to be made generic.
fwidth = np.empty((num_octs * 8, 3), dtype="float64")
cdef OctVisitorData data
data.array = <void *> fwidth.data
@@ -386,6 +389,7 @@
num_octs = selector.count_octs(self, domain_id)
#Return the floating point unitary position of every cell
cdef np.ndarray[np.float64_t, ndim=2] coords
+ # TODO: This *8 needs to be made generic.
coords = np.empty((num_octs * 8, 3), dtype="float64")
cdef OctVisitorData data
data.array = <void *> coords.data
@@ -438,8 +442,10 @@
else:
raise NotImplementedError
self.visit_all_octs(selector, func, &data)
+ # TODO: This *8 needs to be made generic.
if (data.global_index + 1) * 8 * data.dims > source.size:
print "GLOBAL INDEX RAN AHEAD.",
+ # TODO: This *8 needs to be made generic.
print (data.global_index + 1) * 8 * data.dims - source.size
print dest.size, source.size, num_cells
raise RuntimeError
@@ -542,6 +548,7 @@
if parent.children != NULL:
next = parent.children[cind(ind[0],ind[1],ind[2])]
else:
+ # TODO: This *8 does NOT need to be made generic.
parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
for i in range(8):
parent.children[i] = NULL
@@ -608,6 +615,7 @@
data.index = 0
data.domain = 1
self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
+ # TODO: This *8 needs to be made generic.
assert ((data.global_index+1)*8 == data.index)
cdef int root_node_compare(void *a, void *b) nogil:
diff -r be5daa03f219683784d5e415beb3b626ba18fb59 -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -49,8 +49,6 @@
cdef oct_visitor_function count_total_octs
cdef oct_visitor_function count_total_cells
-cdef oct_visitor_function mark_octs
-cdef oct_visitor_function mask_octs
cdef oct_visitor_function index_octs
cdef oct_visitor_function icoords_octs
cdef oct_visitor_function ires_octs
diff -r be5daa03f219683784d5e415beb3b626ba18fb59 -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,6 +38,7 @@
if selected == 0: return
cdef int i
# There are this many records between "octs"
+ # TODO: This 8 needs to be made into a generic value.
cdef np.int64_t index = (data.global_index * 8)*data.dims
cdef np.float64_t **p = <np.float64_t**> data.array
index += oind(data)*data.dims
@@ -50,6 +51,7 @@
# "last" here tells us the dimensionality of the array.
if selected == 0: return
cdef int i
+ # TODO: This 8 needs to be made into a generic value.
cdef np.int64_t index = (data.global_index * 8)*data.dims
cdef np.int64_t **p = <np.int64_t**> data.array
index += oind(data)*data.dims
@@ -68,25 +70,6 @@
# Number of *cells* visited and selected.
data.index += selected
-cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
- # We mark them even if they are not selected
- cdef int i
- cdef np.uint8_t *arr = <np.uint8_t *> data.array
- if data.last != o.domain_ind:
- data.last = o.domain_ind
- data.index += 1
- cdef np.int64_t index = data.index * 8
- index += oind(data)
- arr[index] = 1
-
-cdef void mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
- if selected == 0: return
- cdef int i
- cdef np.uint8_t *arr = <np.uint8_t *> data.array
- cdef np.int64_t index = data.global_index * 8
- index += oind(data)
- arr[index] = 1
-
cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
# Note that we provide an index even if the cell is not selected.
cdef int i
@@ -101,6 +84,8 @@
if selected == 0: return
cdef np.int64_t *coords = <np.int64_t*> data.array
cdef int i
+ # TODO: data.ind and the number of bits we shift need to be made general
+ # for octrees with > 8 zones.
for i in range(3):
coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
data.index += 1
@@ -120,6 +105,8 @@
cdef np.float64_t *fcoords = <np.float64_t*> data.array
cdef int i
cdef np.float64_t c, dx
+ # TODO: data.ind and the number of bits we shift in dx and in data.pos need
+ # to be made general for octrees with > 8 zones.
dx = 1.0 / (2 << data.level)
for i in range(3):
c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i])
diff -r be5daa03f219683784d5e415beb3b626ba18fb59 -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -231,6 +231,10 @@
if root.children != NULL:
ch = root.children[cind(i,j,k)]
if iter == 1 and next_level == 1 and ch != NULL:
+ # Note that data.pos is always going to be the
+ # position of the Oct -- it is *not* always going
+ # to be the same as the position of the cell under
+ # investigation.
data.pos[0] = (data.pos[0] << 1) + i
data.pos[1] = (data.pos[1] << 1) + j
data.pos[2] = (data.pos[2] << 1) + k
@@ -243,11 +247,15 @@
data.pos[2] = (data.pos[2] >> 1)
data.level -= 1
elif this_level == 1:
+ # TODO: Refactor to enable multiple cells
+ # This code should be able to iterate over
+ # cells, even though the rest cannot.
selected = self.select_cell(spos, sdds)
if ch != NULL:
selected *= self.overlap_cells
data.global_index += increment
increment = 0
+ # data.ind refers to the cell, not to the oct.
data.ind[0] = i
data.ind[1] = j
data.ind[2] = k
https://bitbucket.org/yt_analysis/yt-3.0/commits/0849d317e494/
Changeset: 0849d317e494
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-21 21:34:50
Summary: Merging from main yt-3.0 branch into 'smoothing' bookmark.
Affected #: 82 files
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -4,8 +4,16 @@
juxtaposicion at gmail.com = cemoody at ucsc.edu
chummels at gmail.com = chummels at astro.columbia.edu
jwise at astro.princeton.edu = jwise at physics.gatech.edu
-atmyers = atmyers at berkeley.edu
sam.skillman at gmail.com = samskillman at gmail.com
casey at thestarkeffect.com = caseywstark at gmail.com
chiffre = chiffre at posteo.de
Christian Karch = chiffre at posteo.de
+atmyers at berkeley.edu = atmyers2 at gmail.com
+atmyers = atmyers2 at gmail.com
+drudd = drudd at uchicago.edu
+awetzel = andrew.wetzel at yale.edu
+David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
+tabel = tabel at slac.stanford.edu
+sername=kayleanelson = kaylea.nelson at yale.edu
+kayleanelson = kaylea.nelson at yale.edu
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,13 +12,16 @@
yt/frontends/sph/smoothing_kernel.c
yt/geometry/fake_octree.c
yt/geometry/oct_container.c
+yt/geometry/oct_visitors.c
yt/geometry/particle_deposit.c
+yt/geometry/particle_oct_container.c
yt/geometry/selection_routines.c
yt/utilities/amr_utils.c
yt/utilities/kdtree/forthonf2c.h
yt/utilities/libconfig_wrapper.c
yt/utilities/spatial/ckdtree.c
yt/utilities/lib/alt_ray_tracers.c
+yt/utilities/lib/amr_kdtools.c
yt/utilities/lib/CICDeposit.c
yt/utilities/lib/ContourFinding.c
yt/utilities/lib/DepthFirstOctree.c
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5158,3 +5158,4 @@
0000000000000000000000000000000000000000 hop callback
a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
+f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
recursive-include yt/gui/reason/html *.html *.png *.ico *.js
recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -473,11 +473,18 @@
function do_setup_py
{
[ -e $1/done ] && return
- echo "Installing $1 (arguments: '$*')"
- [ ! -e $1/extracted ] && tar xfz $1.tar.gz
- touch $1/extracted
- cd $1
- if [ ! -z `echo $1 | grep h5py` ]
+ LIB=$1
+ shift
+ if [ -z "$@" ]
+ then
+ echo "Installing $LIB"
+ else
+ echo "Installing $LIB (arguments: '$@')"
+ fi
+ [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+ touch $LIB/extracted
+ cd $LIB
+ if [ ! -z `echo $LIB | grep h5py` ]
then
shift
( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -519,8 +526,8 @@
function get_ytproject
{
+ [ -e $1 ] && return
echo "Downloading $1 from yt-project.org"
- [ -e $1 ] && return
${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
}
@@ -551,67 +558,93 @@
mkdir -p ${DEST_DIR}/src
cd ${DEST_DIR}/src
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
# Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1 Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0 Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1 PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299 Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12 bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6 reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3 freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3 h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554 libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208 matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2 mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8 numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
-echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865 zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83 pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1 tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
-echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397 python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
-echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202 h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1 hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56 ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586 libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97 mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4 nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684 numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68 python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
-echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
-echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
-echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4 scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4 sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8 sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
# Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
[ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
-get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
-get_ytproject sympy-0.7.2.tar.gz
-get_ytproject rockstar-0.99.6.tar.gz
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
if [ $INST_BZLIB -eq 1 ]
then
- if [ ! -e bzip2-1.0.6/done ]
+ if [ ! -e $BZLIB/done ]
then
- [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+ [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
echo "Installing BZLIB"
- cd bzip2-1.0.6
+ cd $BZLIB
if [ `uname` = "Darwin" ]
then
if [ -z "${CC}" ]
@@ -634,11 +667,11 @@
if [ $INST_ZLIB -eq 1 ]
then
- if [ ! -e zlib-1.2.7/done ]
+ if [ ! -e $ZLIB/done ]
then
- [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+ [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
echo "Installing ZLIB"
- cd zlib-1.2.7
+ cd $ZLIB
( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -652,11 +685,11 @@
if [ $INST_PNG -eq 1 ]
then
- if [ ! -e libpng-1.6.1/done ]
+ if [ ! -e $PNG/done ]
then
- [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+ [ ! -e $PNG ] && tar xfz $PNG.tar.gz
echo "Installing PNG"
- cd libpng-1.6.1
+ cd $PNG
( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -670,13 +703,14 @@
if [ $INST_FTYPE -eq 1 ]
then
- if [ ! -e freetype-2.4.11/done ]
+ if [ ! -e $FREETYPE_VER/done ]
then
- [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+ [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
echo "Installing FreeType2"
- cd freetype-2.4.11
+ cd $FREETYPE_VER
( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
- ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
@@ -688,11 +722,11 @@
if [ -z "$HDF5_DIR" ]
then
- if [ ! -e hdf5-1.8.9/done ]
+ if [ ! -e $HDF5/done ]
then
- [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+ [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
echo "Installing HDF5"
- cd hdf5-1.8.9
+ cd $HDF5
( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -707,11 +741,11 @@
if [ $INST_SQLITE3 -eq 1 ]
then
- if [ ! -e sqlite-autoconf-3071601/done ]
+ if [ ! -e $SQLITE/done ]
then
- [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+ [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
echo "Installing SQLite3"
- cd sqlite-autoconf-3071601
+ cd $SQLITE
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -720,11 +754,11 @@
fi
fi
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e $PYTHON/done ]
then
echo "Installing Python. This may take a while, but don't worry. yt loves you."
- [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
- cd Python-2.7.4
+ [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+ cd $PYTHON
( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -739,7 +773,7 @@
if [ $INST_HG -eq 1 ]
then
- do_setup_py mercurial-2.5.4
+ do_setup_py $MERCURIAL
export HG_EXEC=${DEST_DIR}/bin/hg
else
# We assume that hg can be found in the path.
@@ -788,9 +822,9 @@
if [ $INST_SCIPY -eq 0 ]
then
- do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+ do_setup_py $NUMPY ${NUMPY_ARGS}
else
- if [ ! -e scipy-0.11.0/done ]
+ if [ ! -e $SCIPY/done ]
then
if [ ! -e BLAS/done ]
then
@@ -798,17 +832,17 @@
echo "Building BLAS"
cd BLAS
gfortran -O2 -fPIC -fno-second-underscore -c *.f
- ar r libfblas.a *.o 1>> ${LOG_FILE}
+ ar r libfblas.a *.o &>> ${LOG_FILE}
ranlib libfblas.a 1>> ${LOG_FILE}
rm -rf *.o
touch done
cd ..
fi
- if [ ! -e lapack-3.4.2/done ]
+ if [ ! -e $LAPACK/done ]
then
- tar xfz lapack-3.4.2.tar.gz
+ tar xfz $LAPACK.tar.gz
echo "Building LAPACK"
- cd lapack-3.4.2/
+ cd $LAPACK/
cp INSTALL/make.inc.gfortran make.inc
make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
touch done
@@ -816,9 +850,9 @@
fi
fi
export BLAS=$PWD/BLAS/libfblas.a
- export LAPACK=$PWD/lapack-3.4.2/liblapack.a
- do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
- do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+ export LAPACK=$PWD/$LAPACK/liblapack.a
+ do_setup_py $NUMPY ${NUMPY_ARGS}
+ do_setup_py $SCIPY ${NUMPY_ARGS}
fi
if [ -n "${MPL_SUPP_LDFLAGS}" ]
@@ -840,10 +874,10 @@
echo "Setting CFLAGS ${CFLAGS}"
fi
# Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+do_setup_py $MATPLOTLIB
if [ -n "${OLD_LDFLAGS}" ]
then
export LDFLAG=${OLD_LDFLAGS}
@@ -855,36 +889,36 @@
# Now we do our IPython installation, which has two optional dependencies.
if [ $INST_0MQ -eq 1 ]
then
- if [ ! -e zeromq-3.2.2/done ]
+ if [ ! -e $ZEROMQ/done ]
then
- [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+ [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
echo "Installing ZeroMQ"
- cd zeromq-3.2.2
+ cd $ZEROMQ
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
fi
- do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
- do_setup_py tornado-3.0
+ do_setup_py $PYZMQ --zmq=${DEST_DIR}
+ do_setup_py $TORNADO
fi
-do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
-do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
-do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
# Now we build Rockstar and set its environment variable.
if [ $INST_ROCKSTAR -eq 1 ]
then
if [ ! -e Rockstar/done ]
then
- [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+ [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
echo "Building Rockstar"
cd Rockstar
( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,6 +1,6 @@
#!python
import os, re
-from distutils import version
+from distutils.version import LooseVersion
from yt.mods import *
from yt.data_objects.data_containers import YTDataContainer
namespace = locals().copy()
@@ -23,10 +23,12 @@
code.interact(doc, None, namespace)
sys.exit()
-if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
api_version = '0.10'
+elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+ api_version = '0.11'
else:
- api_version = '0.11'
+ api_version = '1.0'
if api_version == "0.10" and "DISPLAY" in os.environ:
from matplotlib import rcParams
@@ -42,13 +44,18 @@
ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
elif api_version == "0.10":
ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
-elif api_version == "0.11":
- from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+else:
+ if api_version == "0.11":
+ from IPython.frontend.terminal.interactiveshell import \
+ TerminalInteractiveShell
+ elif api_version == "1.0":
+ from IPython.terminal.interactiveshell import TerminalInteractiveShell
+ else:
+ raise RuntimeError
ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
display_banner = True)
if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
-else:
- raise RuntimeError
+
# The rest is a modified version of the IPython default profile code
@@ -77,7 +84,7 @@
ip = ip_shell.IP.getapi()
try_next = IPython.ipapi.TryNext
kwargs = dict(sys_exit=1, banner=doc)
-elif api_version == "0.11":
+elif api_version in ("0.11", "1.0"):
ip = ip_shell
try_next = IPython.core.error.TryNext
kwargs = dict()
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -96,7 +96,7 @@
if answer_big_data:
nose_argv.append('--answer-big-data')
log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
- ytcfg["yt","suppressStreamLogging"] = 'True'
+ ytcfg.set("yt","suppressStreamLogging", 'True')
initial_dir = os.getcwd()
yt_file = os.path.abspath(__file__)
yt_dir = os.path.dirname(yt_file)
@@ -105,4 +105,4 @@
nose.run(argv=nose_argv)
finally:
os.chdir(initial_dir)
- ytcfg["yt","suppressStreamLogging"] = log_suppress
+ ytcfg.set("yt","suppressStreamLogging", str(log_suppress))
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- /dev/null
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -0,0 +1,809 @@
+from scipy import optimize
+import numpy as na
+import h5py
+from yt.analysis_modules.absorption_spectrum.absorption_line \
+ import voigt
+
+
+def generate_total_fit(x, fluxData, orderFits, speciesDicts,
+ minError=1E-5, complexLim=.999,
+ fitLim=.99, minLength=3,
+ maxLength=1000, splitLim=.99,
+ output_file=None):
+
+ """
+ This function is designed to fit an absorption spectrum by breaking
+ the spectrum up into absorption complexes, and iteratively adding
+ and optimizing voigt profiles to each complex.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ 1d array of wavelengths
+ fluxData : (N) ndarray
+ array of flux corresponding to the wavelengths given
+ in x. (needs to be the same size as x)
+ orderFits : list
+ list of the names of the species in the order that they
+ should be fit. Names should correspond to the names of the species
+ given in speciesDicts. (ex: ['lya','OVI'])
+ speciesDicts : dictionary
+ Dictionary of dictionaries (I'm addicted to dictionaries, I
+ confess). Top level keys should be the names of all the species given
+ in orderFits. The entries should be dictionaries containing all
+ relevant parameters needed to create an absorption line of a given
+ species (f,Gamma,lambda0) as well as max and min values for parameters
+ to be fit
+ complexLim : float, optional
+ Maximum flux to start the edge of an absorption complex. Different
+ from fitLim because it decides extent of a complex rather than
+ whether or not a complex is accepted.
+ fitLim : float,optional
+ Maximum flux where the level of absorption will trigger
+ identification of the region as an absorption complex. Default = .98.
+ (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+ .99 will not be separated out to be fit as an absorbing complex, but
+ a region that contains a point where the flux is .97 will be fit
+ as an absorbing complex.)
+ minLength : int, optional
+ number of cells required for a complex to be included.
+ default is 3 cells.
+ maxLength : int, optional
+ number of cells required for a complex to be split up. Default
+ is 1000 cells.
+ splitLim : float, optional
+ if attempting to split a region for being larger than maxlength
+ the point of the split must have a flux greater than splitLim
+ (ie: absorption greater than splitLim). Default= .99.
+ output_file : string, optional
+ location to save the results of the fit.
+
+ Returns
+ -------
+ allSpeciesLines : dictionary
+ Dictionary of dictionaries representing the fit lines.
+ Top level keys are the species given in orderFits and the corresponding
+ entries are dictionaries with the keys 'N','b','z', and 'group#'.
+ Each of these corresponds to a list of the parameters for every
+ accepted fitted line. (ie: N[0],b[0],z[0] will create a line that
+ fits some part of the absorption spectrum). 'group#' is a similar list
+ but identifies which absorbing complex each line belongs to. Lines
+ with the same group# were fit at the same time. group#'s do not
+ correlate between species (ie: an lya line with group number 1 and
+ an OVI line with group number 1 were not fit together and do
+ not necessarily correspond to the same region)
+ yFit : (N) ndarray
+ array of flux corresponding to the combination of all fitted
+ absorption profiles. Same size as x.
+ """
+
+ #Empty dictionary for fitted lines
+ allSpeciesLines = {}
+
+ #Wavelength of beginning of array, wavelength resolution
+ x0,xRes=x[0],x[1]-x[0]
+
+ #Empty fit without any lines
+ yFit = na.ones(len(fluxData))
+
+ #Find all regions where lines/groups of lines are present
+ cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
+ complexLim=complexLim, minLength=minLength,
+ maxLength=maxLength, splitLim=splitLim)
+
+ #Fit all species one at a time in given order from low to high wavelength
+ for species in orderFits:
+ speciesDict = speciesDicts[species]
+ speciesLines = {'N':na.array([]),
+ 'b':na.array([]),
+ 'z':na.array([]),
+ 'group#':na.array([])}
+
+ #Set up wavelengths for species
+ initWl = speciesDict['wavelength'][0]
+
+ for b_i,b in enumerate(cBounds):
+ xBounded=x[b[1]:b[2]]
+ yDatBounded=fluxData[b[1]:b[2]]
+ yFitBounded=yFit[b[1]:b[2]]
+
+ #Find init redshift
+ z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
+
+ #Check if any flux at partner sites
+ if not _line_exists(speciesDict['wavelength'],
+ fluxData,z,x0,xRes,fitLim):
+ continue
+
+ #Fit Using complex tools
+ newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
+ z,fitLim,minError*(b[2]-b[1]),speciesDict)
+
+ #Check existence of partner lines if applicable
+ newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
+ b, minError*(b[2]-b[1]),
+ x0, xRes, speciesDict)
+
+ #If flagged as a bad fit, species is lyman alpha,
+ # and it may be a saturated line, use special tools
+ if flag and species=='lya' and min(yDatBounded)<.1:
+ newLinesP=_large_flag_fit(xBounded,yDatBounded,
+ yFitBounded,z,speciesDict,
+ minSize,minError*(b[2]-b[1]))
+
+ #Adjust total current fit
+ yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
+
+ #Add new group to all fitted lines
+ if na.size(newLinesP)>0:
+ speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
+ speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
+ speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
+ groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
+ speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+
+ allSpeciesLines[species]=speciesLines
+
+ if output_file:
+ _output_fit(allSpeciesLines, output_file)
+
+ return (allSpeciesLines,yFit)
+
+def _complex_fit(x, yDat, yFit, initz, minSize, errBound, speciesDict,
+ initP=None):
+ """ Fit an absorption complex by iteratively adding and optimizing
+ voigt profiles.
+
+ A complex is defined as a region where some number of lines may be present,
+ or a region of non zero of absorption. Lines are iteratively added
+ and optimized until the difference between the flux generated using
+ the optimized parameters has a least squares difference between the
+ desired flux profile less than the error bound.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelength
+ ydat : (N) ndarray
+ array of desired flux profile to be fitted for the wavelength
+ space given by x. Same size as x.
+ yFit : (N) ndarray
+ array of flux profile fitted for the wavelength
+ space given by x already. Same size as x.
+ initz : float
+ redshift to try putting first line at
+ (maximum absorption for region)
+ minsize : float
+ minimum absorption allowed for a line to still count as a line
+ given in normalized flux (ie: for minSize=.9, only lines with minimum
+ flux less than .9 will be fitted)
+ errbound : float
+ maximum total error allowed for an acceptable fit
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+ initP : (,3,) ndarray
+ initial guess to try for line parameters to fit the region. Used
+ by large_flag_fit. Default = None, and initial guess generated
+ automatically.
+
+ Returns
+ -------
+ linesP : (3,) ndarray
+ Array of best parameters if a good enough fit is found in
+ the form [[N1,b1,z1], [N2,b2,z2],...]
+ flag : bool
+ boolean value indicating the success of the fit (True if unsuccessful)
+ """
+
+ #Setup initial line guesses
+ if initP==None: #Regular fit
+ initP = [0,0,0]
+ if min(yDat)<.5: #Large lines get larger initial guess
+ initP[0] = 10**16
+ elif min(yDat)>.9: #Small lines get smaller initial guess
+ initP[0] = 10**12.5
+ else:
+ initP[0] = speciesDict['init_N']
+ initP[1] = speciesDict['init_b']
+ initP[2]=initz
+ initP=na.array([initP])
+
+ linesP = initP
+
+ #For generating new z guesses
+ wl0 = speciesDict['wavelength'][0]
+
+ #Check if first line exists still
+ if min(yDat-yFit+1)>minSize:
+ return [],False
+
+ #Values to proceed through first run
+ errSq,prevErrSq=1,1000
+
+ while True:
+ #Initial parameter guess from joining parameters from all lines
+ # in lines into a single array
+ initP = linesP.flatten()
+
+ #Optimize line
+ fitP,success=optimize.leastsq(_voigt_error,initP,
+ args=(x,yDat,yFit,speciesDict),
+ epsfcn=1E-10,maxfev=1000)
+
+ #Set results of optimization
+ linesP = na.reshape(fitP,(-1,3))
+
+ #Generate difference between current best fit and data
+ yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+ dif = yFit*yNewFit-yDat
+
+ #Sum to get idea of goodness of fit
+ errSq=sum(dif**2)
+
+ #If good enough, break
+ if errSq < errBound:
+ break
+
+ #If last fit was worse, reject the last line and revert to last fit
+ if errSq > prevErrSq*10:
+ #If its still pretty damn bad, cut losses and try flag fit tools
+ if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
+ return [],True
+ else:
+ yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+ break
+
+ #If too many lines
+ if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+ #If its fitable by flag tools and still bad, use flag tools
+ if errSq >1E2*errBound and speciesDict['name']=='HI lya':
+ return [],True
+ else:
+ break
+
+ #Store previous data in case reject next fit
+ prevErrSq = errSq
+ prevLinesP = linesP
+
+
+ #Set up initial condition for new line
+ newP = [0,0,0]
+ if min(dif)<.1:
+ newP[0]=10**12
+ elif min(dif)>.9:
+ newP[0]=10**16
+ else:
+ newP[0]=10**14
+ newP[1] = speciesDict['init_b']
+ newP[2]=(x[dif.argmax()]-wl0)/wl0
+ linesP=na.append(linesP,[newP],axis=0)
+
+
+ #Check the parameters of all lines to see if they fall in an
+ # acceptable range, as given in dict ref
+ remove=[]
+ for i,p in enumerate(linesP):
+ check=_check_params(na.array([p]),speciesDict)
+ if check:
+ remove.append(i)
+ linesP = na.delete(linesP,remove,axis=0)
+
+ return linesP,False
+
+def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
+ """
+ Attempts to more robustly fit saturated lyman alpha regions that have
+ not converged to satisfactory fits using the standard tools.
+
+ Uses a preselected sample of a wide range of initial parameter guesses
+ designed to fit saturated lines (see get_test_lines).
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelength
+ ydat : (N) ndarray
+ array of desired flux profile to be fitted for the wavelength
+ space given by x. Same size as x.
+ yFit : (N) ndarray
+ array of flux profile fitted for the wavelength
+ space given by x already. Same size as x.
+ initz : float
+ redshift to try putting first line at
+ (maximum absorption for region)
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+ minsize : float
+ minimum absorption allowed for a line to still count as a line
+ given in normalized flux (ie: for minSize=.9, only lines with minimum
+ flux less than .9 will be fitted)
+ errbound : float
+ maximum total error allowed for an acceptable fit
+
+ Returns
+ -------
+ bestP : (3,) ndarray
+ array of best parameters if a good enough fit is found in
+ the form [[N1,b1,z1], [N2,b2,z2],...]
+ """
+
+ #Set up some initial line guesses
+ lineTests = _get_test_lines(initz)
+
+ #Keep track of the lowest achieved error
+ bestError = 1000
+
+ #Iterate through test line guesses
+ for initLines in lineTests:
+ if initLines[1,0]==0:
+ initLines = na.delete(initLines,1,axis=0)
+
+ #Do fitting with initLines as first guess
+ linesP,flag=_complex_fit(x,yDat,yFit,initz,
+ minSize,errBound,speciesDict,initP=initLines)
+
+ #Find error of last fit
+ yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+ dif = yFit*yNewFit-yDat
+ errSq=sum(dif**2)
+
+ #If error lower, keep track of the lines used to make that fit
+ if errSq < bestError:
+ bestError = errSq
+ bestP = linesP
+
+ if bestError>10*errBound*len(x):
+ return []
+ else:
+ return bestP
+
+def _get_test_lines(initz):
+ """
+ Returns a 3d numpy array of lines to test as initial guesses for difficult
+ to fit lyman alpha absorbers that are saturated.
+
+ The array is 3d because
+ the first dimension gives separate initial guesses, the second dimension
+ has multiple lines for the same guess (trying a broad line plus a
+ saturated line) and the 3d dimension contains the 3 fit parameters (N,b,z)
+
+ Parameters
+ ----------
+ initz : float
+ redshift to give all the test lines
+
+ Returns
+ -------
+ testP : (,3,) ndarray
+ numpy array of the form
+ [[[N1a,b1a,z1a], [N1b,b1b,z1b]], [[N2a,b2,z2a],...] ...]
+ """
+
+ #Set up a bunch of empty lines
+ testP = na.zeros((10,2,3))
+
+ testP[0,0,:]=[1E18,20,initz]
+ testP[1,0,:]=[1E18,40,initz]
+ testP[2,0,:]=[1E16,5, initz]
+ testP[3,0,:]=[1E16,20,initz]
+ testP[4,0,:]=[1E16,80,initz]
+
+ testP[5,0,:]=[1E18,20,initz]
+ testP[6,0,:]=[1E18,40,initz]
+ testP[7,0,:]=[1E16,5, initz]
+ testP[8,0,:]=[1E16,20,initz]
+ testP[9,0,:]=[1E16,80,initz]
+
+ testP[5,1,:]=[1E13,100,initz]
+ testP[6,1,:]=[1E13,100,initz]
+ testP[7,1,:]=[1E13,100,initz]
+ testP[8,1,:]=[1E13,100,initz]
+ testP[9,1,:]=[1E13,100,initz]
+
+ return testP
+
+def _get_bounds(z, b, wl, x0, xRes):
+ """
+ Gets the indices of range of wavelength that the wavelength wl is in
+ with the size of some initial wavelength range.
+
+ Used for checking if species with multiple lines (as in the OVI doublet)
+ fit all lines appropriately.
+
+ Parameters
+ ----------
+ z : float
+ redshift
+ b : (3) ndarray/list
+ initial bounds in form [i0,i1,i2] where i0 is the index of the
+ minimum flux for the complex, i1 is index of the lower wavelength
+ edge of the complex, and i2 is the index of the higher wavelength
+ edge of the complex.
+ wl : float
+ unredshifted wavelength of the peak of the new region
+ x0 : float
+ wavelength of the index 0
+ xRes : float
+ difference in wavelength for two consecutive indices
+
+ Returns
+ -------
+ indices : (2) tuple
+ Tuple (i1,i2) where i1 is the index of the lower wavelength bound of
+ the new region and i2 is the index of the higher wavelength bound of
+ the new region
+ """
+
+ r=[-b[1]+100+b[0],b[2]+100-b[0]]
+ redWl = (z+1)*wl
+ iRedWl=int((redWl-x0)/xRes)
+ indices = (iRedWl-r[0],iRedWl+r[1])
+
+ return indices
+
+def _remove_unaccepted_partners(linesP, x, y, b, errBound,
+ x0, xRes, speciesDict):
+ """
+ Given a set of parameters [N,b,z] that form multiple lines for a given
+ species (as in the OVI doublet), remove any set of parameters where
+ not all transition wavelengths have a line that matches the fit.
+
+ (ex: if a fit is determined based on the first line of the OVI doublet,
+ but the given parameters give a bad fit of the wavelength space of
+ the second line then that set of parameters is removed from the array
+ of line parameters.)
+
+ Parameters
+ ----------
+ linesP : (3,) ndarray
+ array giving sets of line parameters in
+ form [[N1, b1, z1], ...]
+ x : (N) ndarray
+ wavelength array [nm]
+ y : (N) ndarray
+ normalized flux array of original data
+ b : (3) tuple/list/ndarray
+ indices that give the bounds of the original region so that another
+ region of similar size can be used to determine the goodness
+ of fit of the other wavelengths
+ errBound : float
+ size of the error that is appropriate for a given region,
+ adjusted to account for the size of the region.
+
+ Returns
+ -------
+ linesP : (3,) ndarray
+ array similar to linesP that only contains lines with
+ appropriate fits of all transition wavelengths.
+ """
+
+ #List of lines to remove
+ removeLines=[]
+
+ #Iterate through all sets of line parameters
+ for i,p in enumerate(linesP):
+
+ #iterate over all transition wavelengths
+ for wl in speciesDict['wavelength']:
+
+ #Get the bounds of a similar sized region around the
+ # appropriate wavelength, and then get the appropriate
+ # region of wavelength and flux
+ lb = _get_bounds(p[2],b,wl,x0,xRes)
+ xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
+
+ #Generate a fit and find the difference to data
+ yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+ dif =yb-yFitb
+
+ #Only counts as an error if line is too big ---------------<
+ dif = [k for k in dif if k>0]
+ err = sum(dif)
+
+ #If the fit is too bad then add the line to list of removed lines
+ if err > errBound*1E2:
+ removeLines.append(i)
+ break
+
+ #Remove all bad line fits
+ linesP = na.delete(linesP,removeLines,axis=0)
+
+ return linesP
+
+
+
+def _line_exists(wavelengths, y, z, x0, xRes,fluxMin):
+ """For a group of lines finds if the there is some change in flux greater
+ than some minimum at the same redshift with different initial wavelengths
+
+ Parameters
+ ----------
+ wavelengths : (N) ndarray
+ array of initial wavelengths to check
+ y : (N) ndarray
+ flux array to check
+ x0 : float
+ wavelength of the first value in y
+ xRes : float
+ difference in wavelength between consecutive cells in flux array
+ fluxMin : float
+ maximum flux to count as a line existing.
+
+ Returns
+ -------
+
+ flag : boolean
+ value indicating whether all lines exist. True if all lines exist
+ """
+
+ #Iterate through initial wavelengths
+ for wl in wavelengths:
+ #Redshifted wavelength
+ redWl = (z+1)*wl
+
+ #Index of the redshifted wavelength
+ indexRedWl = (redWl-x0)/xRes
+
+ #Check if surpasses minimum absorption bound
+ if y[int(indexRedWl)]>fluxMin:
+ return False
+
+ return True
+
+def _find_complexes(x, yDat, complexLim=.999, fitLim=.99,
+ minLength =3, maxLength=1000, splitLim=.99):
+ """Breaks up the wavelength space into groups
+ where there is some absorption.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelengths
+ yDat : (N) ndarray
+ array of flux corresponding to the wavelengths given
+ in x. (needs to be the same size as x)
+ complexLim : float, optional
+ Maximum flux to start the edge of an absorption complex. Different
+ from fitLim because it decides extent of a complex rather than
+ whether or not a complex is accepted.
+ fitLim : float,optional
+ Maximum flux where the level of absorption will trigger
+ identification of the region as an absorption complex. Default = .98.
+ (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+ .99 will not be separated out to be fit as an absorbing complex, but
+ a region that contains a point where the flux is .97 will be fit
+ as an absorbing complex.)
+ minLength : int, optional
+ number of cells required for a complex to be included.
+ default is 3 cells.
+ maxLength : int, optional
+ number of cells required for a complex to be split up. Default
+ is 1000 cells.
+ splitLim : float, optional
+ if attempting to split a region for being larger than maxlength
+ the point of the split must have a flux greater than splitLim
+ (ie: absorption greater than splitLim). Default= .99.
+
+ Returns
+ -------
+ cBounds : (3,)
+ list of bounds in the form [[i0,i1,i2],...] where i0 is the
+ index of the maximum flux for a complex, i1 is the index of the
+ beginning of the complex, and i2 is the index of the end of the
+ complex. Indexes refer to the indices of x and yDat.
+ """
+
+ #Initialize empty list of bounds
+ cBounds=[]
+
+ #Iterate through cells of flux
+ i=0
+ while (i<len(x)):
+
+ #Start tracking at a region that surpasses flux of edge
+ if yDat[i]<complexLim:
+
+ #Iterate through until reach next edge
+ j=0
+ while yDat[i+j]<complexLim: j=j+1
+
+ #Check if the complex is big enough
+ if j >minLength:
+
+ #Check if there is enough absorption for the complex to
+ # be included
+ cPeak = yDat[i:i+j].argmin()
+ if yDat[cPeak+i]<fitLim:
+ cBounds.append([cPeak+i,i,i+j])
+
+ i=i+j
+ i=i+1
+
+ i=0
+ #Iterate through the bounds
+ while i < len(cBounds):
+ b=cBounds[i]
+
+ #Check if the region needs to be divided
+ if b[2]-b[1]>maxLength:
+
+ #Find the minimum absorption in the middle two quartiles of
+ # the large complex
+ q=(b[2]-b[1])/4
+ cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+ #Only break it up if the minimum absorption is actually low enough
+ if yDat[cut]>splitLim:
+
+ #Get the new two peaks
+ b1Peak = yDat[b[1]:cut].argmin()+b[1]
+ b2Peak = yDat[cut:b[2]].argmin()+cut
+
+ #add the two regions separately
+ cBounds.insert(i+1,[b1Peak,b[1],cut])
+ cBounds.insert(i+2,[b2Peak,cut,b[2]])
+
+ #Remove the original region
+ cBounds.pop(i)
+ i=i+1
+ i=i+1
+
+ return cBounds
+
+def _gen_flux_lines(x, linesP, speciesDict):
+ """
+ Calculates the normalized flux for a region of wavelength space
+ generated by a set of absorption lines.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ Array of wavelength
+ linesP: (3,) ndarray
+ Array giving sets of line parameters in
+ form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ Dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+
+ Returns
+ -------
+ flux : (N) ndarray
+ Array of normalized flux generated by the line parameters
+ given in linesP over the wavelength space given in x. Same size as x.
+ """
+ y=0
+ for p in linesP:
+ for i in range(speciesDict['numLines']):
+ f=speciesDict['f'][i]
+ g=speciesDict['Gamma'][i]
+ wl=speciesDict['wavelength'][i]
+ y = y+ _gen_tau(x,p,f,g,wl)
+ flux = na.exp(-y)
+ return flux
+
+def _gen_tau(t, p, f, Gamma, lambda_unshifted):
+ """This calculates a flux distribution for given parameters using the yt
+ voigt profile generator"""
+ N,b,z= p
+
+ #Calculating quantities
+ tau_o = 1.4973614E-15*N*f*lambda_unshifted/b
+ a=7.95774715459E-15*Gamma*lambda_unshifted/b
+ x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
+
+ H = na.zeros(len(x))
+ H = voigt(a,x)
+
+ tau = tau_o*H
+
+ return tau
+
+def _voigt_error(pTotal, x, yDat, yFit, speciesDict):
+ """
+ Gives the error of each point used to optimize the fit of a group
+ of absorption lines to a given flux profile.
+
+ If the parameters are not in the acceptable range as defined
+ in speciesDict, the first value of the error array will
+ contain a large value (999), to prevent the optimizer from running
+ into negative number problems.
+
+ Parameters
+ ----------
+ pTotal : (3,) ndarray
+ Array with form [[N1, b1, z1], ...]
+ x : (N) ndarray
+ array of wavelengths [nm]
+ yDat : (N) ndarray
+ desired normalized flux from fits of lines in wavelength
+ space given by x
+ yFit : (N) ndarray
+ previous fit over the wavelength space given by x.
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+
+ Returns
+ -------
+ error : (N) ndarray
+ the difference between the fit generated by the parameters
+ given in pTotal multiplied by the previous fit and the desired
+ flux profile, w/ first index modified appropriately for bad
+ parameter choices
+ """
+
+ pTotal.shape = (-1,3)
+ yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
+
+ error = yDat-yFit*yNewFit
+ error[0] = _check_params(pTotal,speciesDict)
+
+ return error
+
+def _check_params(p, speciesDict):
+ """
+ Check to see if any of the parameters in p fall outside the range
+ given in speciesDict.
+
+ Parameters
+ ----------
+ p : (3,) ndarray
+ array with form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ dictionary with properties giving the max and min
+ values appropriate for each parameter N,b, and z.
+
+ Returns
+ -------
+ check : int
+ 0 if all values are fine
+ 999 if any values fall outside acceptable range
+ """
+ check = 0
+ if any(p[:,0] > speciesDict['maxN']) or\
+ any(p[:,0] < speciesDict['minN']) or\
+ any(p[:,1] > speciesDict['maxb']) or\
+ any(p[:,1] < speciesDict['minb']) or\
+ any(p[:,2] > speciesDict['maxz']) or\
+ any(p[:,2] < speciesDict['minz']):
+ check = 999
+ return check
+
+
+def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
+ """
+ This function is designed to output the parameters of the series
+ of lines used to fit an absorption spectrum.
+
+ The dataset contains entries in the form species/N, species/b
+ species/z, and species/complex. The ith entry in each of the datasets
+ is the fitted parameter for the ith line fitted to the spectrum for
+ the given species. The species names come from the fitted line
+ dictionary.
+
+ Parameters
+ ----------
+ lineDic : dictionary
+ Dictionary of dictionaries representing the fit lines.
+ Top level keys are the species given in orderFits and the corresponding
+ entries are dictionaries with the keys 'N','b','z', and 'group#'.
+ Each of these corresponds to a list of the parameters for every
+ accepted fitted line.
+ fileName : string, optional
+ Name of the file to output fit to. Default = 'spectrum_fit.h5'
+
+ """
+ f = h5py.File(file_name, 'w')
+ for ion, params in lineDic.iteritems():
+ f.create_dataset("{0}/N".format(ion),data=params['N'])
+ f.create_dataset("{0}/b".format(ion),data=params['b'])
+ f.create_dataset("{0}/z".format(ion),data=params['z'])
+ f.create_dataset("{0}/complex".format(ion),data=params['group#'])
+ print 'Writing spectrum fit to {0}'.format(file_name)
+
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/analysis_modules/absorption_spectrum/api.py
--- a/yt/analysis_modules/absorption_spectrum/api.py
+++ b/yt/analysis_modules/absorption_spectrum/api.py
@@ -30,3 +30,6 @@
from .absorption_spectrum import \
AbsorptionSpectrum
+
+from .absorption_spectrum_fit import \
+ generate_total_fit
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -30,6 +30,7 @@
class StandardRadialAnalysis(object):
def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):
+ raise NotImplementedError # see TODO
self.pf = pf
# We actually don't want to replicate the handling of setting the
# center here, so we will pass it to the sphere creator.
@@ -53,6 +54,7 @@
prof = BinnedProfile1D(self.obj, self.n_bins, "Radius",
self.inner_radius, self.outer_radius)
by_weights = defaultdict(list)
+ # TODO: analysis_field_list is undefined
for fspec in analysis_field_list:
if isinstance(fspec, types.TupleType) and len(fspec) == 2:
field, weight = fspec
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -28,7 +28,7 @@
import ConfigParser, os, os.path, types
ytcfgDefaults = dict(
- serialize = 'True',
+ serialize = 'False',
onlydeserialize = 'False',
timefunctions = 'False',
logfile = 'False',
@@ -62,7 +62,7 @@
notebook_password = '',
answer_testing_tolerance = '3',
answer_testing_bitwise = 'False',
- gold_standard_filename = 'gold008',
+ gold_standard_filename = 'gold010',
local_standard_filename = 'local001',
sketchfab_api_key = 'None',
thread_field_detection = 'False'
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -80,7 +80,7 @@
def eval(self, pf):
slc = self.SlicePlot(pf, self.axis, self.field, center = self.center)
- return pc.save()
+ return slc.save()
class QuantityProxy(AnalysisTask):
_params = None
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -89,3 +89,6 @@
from particle_trajectories import \
ParticleTrajectoryCollection
+
+from particle_filters import \
+ particle_filter
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -36,6 +36,7 @@
import fileinput
from re import finditer
+from yt.config import ytcfg
from yt.funcs import *
from yt.utilities.logger import ytLogger
from .data_containers import \
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -411,10 +411,12 @@
def blocks(self):
for io_chunk in self.chunks([], "io"):
for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0)):
- g = self._current_chunk.objs[0]
- mask = g._get_selector_mask(self.selector)
- if mask is None: continue
- yield g, mask
+ # For grids this will be a grid object, and for octrees it will
+ # be an OctreeSubset. Note that we delegate to the sub-object.
+ o = self._current_chunk.objs[0]
+ for b, m in o.select_blocks(self.selector):
+ if m is None: continue
+ yield b, m
class GenerationInProgress(Exception):
def __init__(self, fields):
@@ -433,7 +435,9 @@
@property
def selector(self):
if self._selector is not None: return self._selector
- sclass = getattr(yt.geometry.selection_routines,
+ s_module = getattr(self, '_selector_module',
+ yt.geometry.selection_routines)
+ sclass = getattr(s_module,
"%s_selector" % self._type_name, None)
if sclass is None:
raise YTDataSelectorNotImplemented(self._type_name)
@@ -456,7 +460,9 @@
for field in itertools.cycle(fields_to_get):
if inspected >= len(fields_to_get): break
inspected += 1
- if field not in self.pf.field_dependencies: continue
+ fd = self.pf.field_dependencies.get(field, None) or \
+ self.pf.field_dependencies.get(field[1], None)
+ if fd is None: continue
fd = self.pf.field_dependencies[field]
requested = self._determine_fields(list(set(fd.requested)))
deps = [d for d in requested if d not in fields_to_get]
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -276,6 +276,13 @@
else:
field = item
finfo = self.pf._get_field_info(*field)
+ # For those cases where we are guessing the field type, we will
+ # need to re-update -- otherwise, our item will always not have the
+ # field type. This can lead to, for instance, "unknown" particle
+ # types not getting correctly identified.
+ # Note that the *only* way this works is if we also fix our field
+ # dependencies during checking. Bug #627 talks about this.
+ item = self.pf._last_freq
else:
FI = getattr(self.pf, "field_info", FieldInfo)
if item in FI:
@@ -444,7 +451,7 @@
dd['units'] = self._units
dd['projected_units'] = self._projected_units,
dd['take_log'] = self.take_log
- dd['validators'] = self.validators.copy()
+ dd['validators'] = list(self.validators)
dd['particle_type'] = self.particle_type
dd['vector_field'] = self.vector_field
dd['display_field'] = True
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -492,6 +492,10 @@
if vals is None: return
return vals.reshape(self.ActiveDimensions, order="C")
+ def select_blocks(self, selector):
+ mask = self._get_selector_mask(selector)
+ yield self, mask
+
def _get_selector_mask(self, selector):
if hash(selector) == self._last_selector_id:
mask = self._last_mask
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -92,16 +92,32 @@
return tr
return tr
+ @property
+ def nz(self):
+ return self._num_zones + 2*self._num_ghost_zones
+
def _reshape_vals(self, arr):
if len(arr.shape) == 4: return arr
- nz = self._num_zones + 2*self._num_ghost_zones
+ nz = self.nz
n_oct = arr.shape[0] / (nz**3.0)
- arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+ if arr.size == nz*nz*nz*n_oct:
+ arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+ elif arr.size == nz*nz*nz*n_oct * 3:
+ arr = arr.reshape((nz, nz, nz, n_oct, 3), order="F")
+ else:
+ raise RuntimeError
arr = np.asfortranarray(arr)
return arr
_domain_ind = None
+ def select_blocks(self, selector):
+ mask = self.oct_handler.mask(selector)
+ mask = self._reshape_vals(mask)
+ slicer = OctreeSubsetBlockSlice(self)
+ for i, sl in slicer:
+ yield sl, mask[:,:,:,i]
+
@property
def domain_ind(self):
if self._domain_ind is None:
@@ -114,12 +130,17 @@
cls = getattr(particle_deposit, "deposit_%s" % method, None)
if cls is None:
raise YTParticleDepositionNotImplemented(method)
- nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
+ nz = self.nz
+ nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
op = cls(nvals) # We allocate number of zones, not number of octs
op.initialize()
- mylog.debug("Depositing %s particles into %s Octs",
- positions.shape[0], nvals[-1])
- op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+ mylog.debug("Depositing %s (%s^3) particles into %s Octs",
+ positions.shape[0], positions.shape[0]**0.3333333, nvals[-1])
+ pos = np.array(positions, dtype="float64")
+ # We should not need the following if we know in advance all our fields
+ # need no casting.
+ fields = [np.asarray(f, dtype="float64") for f in fields]
+ op.process_octree(self.oct_handler, self.domain_ind, pos, fields,
self.domain_id, self._domain_offset)
vals = op.finalize()
if vals is None: return
@@ -149,7 +170,7 @@
def select_icoords(self, dobj):
d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
- self._num_octs = d.shape[0] / 8
+ self._num_octs = d.shape[0] / (self.nz**3)
tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
domain_id = self.domain_id)
return tr
@@ -157,7 +178,7 @@
def select_fcoords(self, dobj):
d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
- self._num_octs = d.shape[0] / 8
+ self._num_octs = d.shape[0] / (self.nz**3)
tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
domain_id = self.domain_id)
return tr
@@ -165,7 +186,7 @@
def select_fwidth(self, dobj):
d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
- self._num_octs = d.shape[0] / 8
+ self._num_octs = d.shape[0] / (self.nz**3)
tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
domain_id = self.domain_id)
return tr
@@ -173,7 +194,7 @@
def select_ires(self, dobj):
d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
- self._num_octs = d.shape[0] / 8
+ self._num_octs = d.shape[0] / (self.nz**3)
tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
domain_id = self.domain_id)
return tr
@@ -204,7 +225,7 @@
# This is some subset of an octree. Note that the sum of subsets of an
# octree may multiply include data files. While we can attempt to mitigate
# this, it's unavoidable for many types of data storage on disk.
- _type_name = 'particle_octree_subset'
+ _type_name = 'indexed_octree_subset'
_con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
domain_id = -1
def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
@@ -225,3 +246,49 @@
self.base_region = base_region
self.base_selector = base_region.selector
+class OctreeSubsetBlockSlice(object):
+ def __init__(self, octree_subset):
+ self.ind = None
+ self.octree_subset = octree_subset
+ # Cache some attributes
+ nz = octree_subset.nz
+ self.ActiveDimensions = np.array([nz,nz,nz], dtype="int64")
+ for attr in ["ires", "icoords", "fcoords", "fwidth"]:
+ v = getattr(octree_subset, attr)
+ setattr(self, "_%s" % attr, octree_subset._reshape_vals(v))
+
+ def __iter__(self):
+ for i in range(self._ires.shape[-1]):
+ self.ind = i
+ yield i, self
+
+ def clear_data(self):
+ pass
+
+ def __getitem__(self, key):
+ return self.octree_subset[key][:,:,:,self.ind]
+
+ def get_vertex_centered_data(self, *args, **kwargs):
+ raise NotImplementedError
+
+ @property
+ def id(self):
+ return np.random.randint(1)
+
+ @property
+ def Level(self):
+ return self._ires[0,0,0,self.ind]
+
+ @property
+ def LeftEdge(self):
+ LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+ return LE
+
+ @property
+ def RightEdge(self):
+ RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+ return RE
+
+ @property
+ def dds(self):
+ return self._fwidth[0,0,0,self.ind,:]
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -41,6 +41,32 @@
mass_sun_cgs, \
mh
+def _field_concat(fname):
+ def _AllFields(field, data):
+ v = []
+ for ptype in data.pf.particle_types:
+ data.pf._last_freq = (ptype, None)
+ if ptype == "all" or \
+ ptype in data.pf.known_filters:
+ continue
+ v.append(data[ptype, fname].copy())
+ rv = np.concatenate(v, axis=0)
+ return rv
+ return _AllFields
+
+def _field_concat_slice(fname, axi):
+ def _AllFields(field, data):
+ v = []
+ for ptype in data.pf.particle_types:
+ data.pf._last_freq = (ptype, None)
+ if ptype == "all" or \
+ ptype in data.pf.known_filters:
+ continue
+ v.append(data[ptype, fname][:,axi])
+ rv = np.concatenate(v, axis=0)
+ return rv
+ return _AllFields
+
def particle_deposition_functions(ptype, coord_name, mass_name, registry):
orig = set(registry.keys())
def particle_count(field, data):
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -183,6 +183,8 @@
# Get our bins
if log_space:
+ if lower_bound <= 0.0 or upper_bound <= 0.0:
+ raise YTIllDefinedBounds(lower_bound, upper_bound)
func = np.logspace
lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
else:
@@ -522,7 +524,10 @@
return [self.x_bin_field, self.y_bin_field]
def fix_bounds(upper, lower, logit):
- if logit: return np.log10(upper), np.log10(lower)
+ if logit:
+ if lower <= 0.0 or upper <= 0.0:
+ raise YTIllDefinedBounds(lower, upper)
+ return np.log10(upper), np.log10(lower)
return upper, lower
class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -545,6 +550,8 @@
self.total_stuff = source_data.sum()
binned_field = self._get_empty_field()
weight_field = self._get_empty_field()
+ m_field = self._get_empty_field()
+ q_field = self._get_empty_field()
used_field = self._get_empty_field()
mi = args[0]
bin_indices_x = args[1][self.indices].ravel().astype('int64')
@@ -553,8 +560,8 @@
weight_data = weight_data[mi][self.indices]
nx = bin_indices_x.size
#mylog.debug("Binning %s / %s times", source_data.size, nx)
- Bin2DProfile(bin_indices_x, bin_indices_y, weight_data, source_data,
- weight_field, binned_field, used_field)
+ bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
+ weight_field, binned_field, m_field, q_field, used_field)
if accumulation: # Fix for laziness
if not iterable(accumulation):
raise SyntaxError("Accumulation needs to have length 2")
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -194,7 +194,7 @@
ts = np.abs(ts)
self._dts[grid.id] = dts
self._ts[grid.id] = ts
- self._masks[grid.id] = masks
+ self._masks[grid.id] = mask
return mask
@@ -644,38 +644,6 @@
raise SyntaxError("Making a fixed resolution slice with "
"particles isn't supported yet.")
- def reslice(self, normal, center, width):
-
- # Cleanup
- del self._coord
- del self._pixelmask
-
- self.center = center
- self.width = width
- self.dds = self.width / self.dims
- self.set_field_parameter('center', center)
- self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
- self._d = -1.0 * np.dot(self._norm_vec, self.center)
- # First we try all three, see which has the best result:
- vecs = np.identity(3)
- _t = np.cross(self._norm_vec, vecs).sum(axis=1)
- ax = _t.argmax()
- self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
- self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
- self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
- self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
- self.set_field_parameter('cp_x_vec',self._x_vec)
- self.set_field_parameter('cp_y_vec',self._y_vec)
- self.set_field_parameter('cp_z_vec',self._norm_vec)
- # Calculate coordinates of each pixel
- _co = self.dds * \
- (np.mgrid[-self.dims/2 : self.dims/2,
- -self.dims/2 : self.dims/2] + 0.5)
-
- self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
- np.outer(_co[1,:,:], self._y_vec)
- self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
-
def get_data(self, fields):
"""
Iterates over the list of fields and generates/reads them all.
@@ -860,7 +828,6 @@
"""
_type_name = "region"
_con_args = ('center', 'left_edge', 'right_edge')
- _dx_pad = 0.5
def __init__(self, center, left_edge, right_edge, fields = None,
pf = None, **kwargs):
YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -8,7 +8,7 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('data_objects', parent_package, top_path)
+ config.add_subpackage("tests")
config.make_config_py() # installs __config__.py
- config.add_subpackage("tests")
#config.make_svn_version_py()
return config
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -86,8 +86,11 @@
if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
- _cached_pfs[apath] = obj
- return _cached_pfs[apath]
+ if obj._skip_cache is False:
+ _cached_pfs[apath] = obj
+ else:
+ obj = _cached_pfs[apath]
+ return obj
def __init__(self, filename, data_style=None, file_style=None):
"""
@@ -157,6 +160,10 @@
def _mrep(self):
return MinimalStaticOutput(self)
+ @property
+ def _skip_cache(self):
+ return False
+
def hub_upload(self):
self._mrep.upload()
@@ -261,6 +268,10 @@
raise YTGeometryNotSupported(self.geometry)
def add_particle_filter(self, filter):
+ # This is a dummy, which we set up to enable passthrough of "all"
+ # concatenation fields.
+ n = getattr(filter, "name", filter)
+ self.known_filters[n] = None
if isinstance(filter, types.StringTypes):
used = False
for f in filter_registry[filter]:
@@ -271,6 +282,7 @@
else:
used = self.h._setup_filtered_type(filter)
if not used:
+ self.known_filters.pop(n, None)
return False
self.known_filters[filter.name] = filter
return True
@@ -290,20 +302,25 @@
self._last_finfo = self.field_info[(ftype, fname)]
return self._last_finfo
if fname == self._last_freq[1]:
- mylog.debug("Guessing field %s is (%s, %s)", fname,
- self._last_freq[0], self._last_freq[1])
return self._last_finfo
if fname in self.field_info:
+ # Sometimes, if guessing_type == True, this will be switched for
+ # the type of field it is. So we look at the field type and
+ # determine if we need to change the type.
+ fi = self._last_finfo = self.field_info[fname]
+ if fi.particle_type and self._last_freq[0] \
+ not in self.particle_types:
+ field = "all", field[1]
+ elif not fi.particle_type and self._last_freq[0] \
+ not in self.fluid_types:
+ field = self.default_fluid_type, field[1]
self._last_freq = field
- self._last_finfo = self.field_info[fname]
return self._last_finfo
# We also should check "all" for particles, which can show up if you're
# mixing deposition/gas fields with particle fields.
if guessing_type and ("all", fname) in self.field_info:
self._last_freq = ("all", fname)
self._last_finfo = self.field_info["all", fname]
- mylog.debug("Guessing field %s is (%s, %s)", fname,
- "all", fname)
return self._last_finfo
raise YTFieldNotFound((ftype, fname), self)
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -99,6 +99,7 @@
if fname.startswith("Overdensity"): continue
if FieldInfo[field].particle_type: continue
for nproc in [1, 4, 8]:
+ test_all_fields.__name__ = "%s_%s" % (field, nproc)
yield TestFieldAccess(field, nproc)
if __name__ == "__main__":
diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -753,6 +753,8 @@
rdw = radius.copy()
for i, ax in enumerate('xyz'):
np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+ if data.pf.dimensionality < i+1:
+ break
if data.pf.periodicity[i] == True:
np.abs(r, r)
np.subtract(r, DW[i], rdw)
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt-3.0/commits/74a519fdf95b/
Changeset: 74a519fdf95b
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-21 21:49:27
Summary: Rolling back removal of the masking functions.
Affected #: 2 files
diff -r 0849d317e494957569b94d9b982c492064c34483 -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -49,6 +49,8 @@
cdef oct_visitor_function count_total_octs
cdef oct_visitor_function count_total_cells
+cdef oct_visitor_function mark_octs
+cdef oct_visitor_function mask_octs
cdef oct_visitor_function index_octs
cdef oct_visitor_function icoords_octs
cdef oct_visitor_function ires_octs
diff -r 0849d317e494957569b94d9b982c492064c34483 -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -70,6 +70,25 @@
# Number of *cells* visited and selected.
data.index += selected
+cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+ # We mark them even if they are not selected
+ cdef int i
+ cdef np.uint8_t *arr = <np.uint8_t *> data.array
+ if data.last != o.domain_ind:
+ data.last = o.domain_ind
+ data.index += 1
+ cdef np.int64_t index = data.index * 8
+ index += oind(data)
+ arr[index] = 1
+
+cdef void mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+ if selected == 0: return
+ cdef int i
+ cdef np.uint8_t *arr = <np.uint8_t *> data.array
+ cdef np.int64_t index = data.global_index * 8
+ index += oind(data)
+ arr[index] = 1
+
cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
# Note that we provide an index even if the cell is not selected.
cdef int i
https://bitbucket.org/yt_analysis/yt-3.0/commits/f25eb47bca30/
Changeset: f25eb47bca30
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-21 21:57:00
Summary: Adding an over_refine_factor and abstracting num_zones somewhat. Added setup_data().
Affected #: 7 files
diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -42,7 +42,6 @@
class OctreeSubset(YTSelectionContainer):
_spatial = True
_num_ghost_zones = 0
- _num_zones = 2
_type_name = 'octree_subset'
_skip_add = True
_con_args = ('base_region', 'domain', 'pf')
@@ -50,7 +49,8 @@
_domain_offset = 0
_num_octs = -1
- def __init__(self, base_region, domain, pf):
+ def __init__(self, base_region, domain, pf, over_refine_factor = 1):
+ self._num_zones = 1 << (over_refine_factor)
self.field_data = YTFieldData()
self.field_parameters = {}
self.domain = domain
@@ -228,8 +228,10 @@
_type_name = 'indexed_octree_subset'
_con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
domain_id = -1
- def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+ def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
+ over_refine_factor = 2):
# The first attempt at this will not work in parallel.
+ self._num_zones = 1 << (over_refine_factor)
self.data_files = data_files
self.field_data = YTFieldData()
self.field_parameters = {}
diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -68,6 +68,7 @@
cdef oct_visitor_function *fill_func
cdef int partial_coverage
cdef int nn[3]
+ cdef np.uint8_t oref
cdef np.float64_t DLE[3], DRE[3]
cdef public np.int64_t nocts
cdef public int max_domain
@@ -83,6 +84,7 @@
OctVisitorData *data)
cdef Oct *next_root(self, int domain_id, int ind[3])
cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
+ cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
cdef class SparseOctreeContainer(OctreeContainer):
cdef OctKey *root_nodes
diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -96,8 +96,10 @@
cdef class OctreeContainer:
def __init__(self, oct_domain_dimensions, domain_left_edge,
- domain_right_edge, partial_coverage = 0):
+ domain_right_edge, partial_coverage = 0,
+ over_refine = 1):
# This will just initialize the root mesh octs
+ self.oref = over_refine
self.partial_coverage = partial_coverage
cdef int i, j, k, p
for i in range(3):
@@ -120,6 +122,12 @@
for k in range(self.nn[2]):
self.root_mesh[i][j][k] = NULL
+ cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
+ data.index = 0
+ data.last = -1
+ data.domain = domain_id
+ data.oref = self.oref
+
def __dealloc__(self):
free_octs(self.cont)
if self.root_mesh == NULL: return
@@ -251,8 +259,8 @@
cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
domain_mask = np.zeros(self.max_domain, dtype="uint8")
cdef OctVisitorData data
+ self.setup_data(&data)
data.array = domain_mask.data
- data.domain = -1
self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
cdef int i
domain_ids = []
@@ -335,9 +343,8 @@
cdef np.ndarray[np.uint8_t, ndim=1] coords
coords = np.zeros((num_octs * 8), dtype="uint8")
cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
data.array = <void *> coords.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
return coords.astype("bool")
@@ -352,9 +359,8 @@
# TODO: This *8 needs to be made generic.
coords = np.empty((num_octs * 8, 3), dtype="int64")
cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
data.array = <void *> coords.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
return coords
@@ -370,9 +376,8 @@
# TODO: This *8 needs to be made generic.
res = np.empty(num_octs * 8, dtype="int64")
cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
data.array = <void *> res.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
return res
@@ -387,9 +392,8 @@
# TODO: This *8 needs to be made generic.
fwidth = np.empty((num_octs * 8, 3), dtype="float64")
cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
data.array = <void *> fwidth.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
cdef np.float64_t base_dx
for i in range(3):
@@ -409,9 +413,8 @@
# TODO: This *8 needs to be made generic.
coords = np.empty((num_octs * 8, 3), dtype="float64")
cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
data.array = <void *> coords.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
cdef int i
cdef np.float64_t base_dx
@@ -441,8 +444,8 @@
else:
dest = np.zeros(num_cells, dtype=source.dtype, order='C')
cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
data.index = offset
- data.domain = domain_id
# We only need this so we can continue calculating the offset
data.dims = dims
cdef void *p[2]
@@ -479,10 +482,8 @@
# Here's where we grab the masked items.
ind = np.zeros(self.nocts, 'int64') - 1
cdef OctVisitorData data
- data.domain = domain_id
+ self.setup_data(&data, domain_id)
data.array = ind.data
- data.index = 0
- data.last = -1
self.visit_all_octs(selector, oct_visitors.index_octs, &data)
return ind
@@ -595,13 +596,12 @@
file_inds[i] = -1
cell_inds[i] = 9
cdef OctVisitorData data
- data.index = 0
+ self.setup_data(&data, domain_id)
cdef void *p[3]
p[0] = levels.data
p[1] = file_inds.data
p[2] = cell_inds.data
data.array = p
- data.domain = domain_id
self.visit_all_octs(selector, self.fill_func, &data)
return levels, cell_inds, file_inds
@@ -629,8 +629,7 @@
def finalize(self):
cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
cdef OctVisitorData data
- data.index = 0
- data.domain = 1
+ self.setup_data(&data, 1)
self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
# TODO: This *8 needs to be made generic.
assert ((data.global_index+1)*8 == data.index)
@@ -648,9 +647,11 @@
cdef class SparseOctreeContainer(OctreeContainer):
- def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
+ def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge,
+ over_refine = 1):
cdef int i, j, k, p
self.partial_coverage = 1
+ self.oref = over_refine
for i in range(3):
self.nn[i] = domain_dimensions[i]
self.max_domain = -1
diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -43,6 +43,9 @@
int dims
np.int32_t domain
np.int8_t level
+ np.int8_t oref # This is the level of overref. 1 => 8 zones, 2 => 64, etc.
+ # To calculate nzones, 1 << (oref * 3)
+
ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
np.uint8_t selected)
diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -77,6 +77,7 @@
if data.last != o.domain_ind:
data.last = o.domain_ind
data.index += 1
+ # TODO: This 8 needs to be made into a generic value.
cdef np.int64_t index = data.index * 8
index += oind(data)
arr[index] = 1
@@ -85,6 +86,7 @@
if selected == 0: return
cdef int i
cdef np.uint8_t *arr = <np.uint8_t *> data.array
+ # TODO: This 8 needs to be made into a generic value.
cdef np.int64_t index = data.global_index * 8
index += oind(data)
arr[index] = 1
diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -205,6 +205,7 @@
cdef int i, j, k, m, n, ind[3]
cdef Oct *noct
cdef np.uint64_t prefix1, prefix2
+ # TODO: This does not need to be changed.
o.children = <Oct **> malloc(sizeof(Oct *)*8)
for i in range(2):
for j in range(2):
diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -155,16 +155,13 @@
def count_octs(self, OctreeContainer octree, int domain_id = -1):
cdef OctVisitorData data
- data.index = 0
- data.last = -1
- data.domain = domain_id
+ octree.setup_data(&data, domain_id)
octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
return data.index
def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
cdef OctVisitorData data
- data.index = 0
- data.domain = domain_id
+ octree.setup_data(&data, domain_id)
octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
return data.index
https://bitbucket.org/yt_analysis/yt-3.0/commits/c37cad5b6b0a/
Changeset: c37cad5b6b0a
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-21 23:02:46
Summary: First pass at generalization of cell count in octs.
Affected #: 6 files
diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -229,7 +229,7 @@
_con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
domain_id = -1
def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
- over_refine_factor = 2):
+ over_refine_factor = 1):
# The first attempt at this will not work in parallel.
self._num_zones = 1 << (over_refine_factor)
self.data_files = data_files
diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -127,6 +127,7 @@
data.last = -1
data.domain = domain_id
data.oref = self.oref
+ data.nz = (1 << (data.oref*3))
def __dealloc__(self):
free_octs(self.cont)
@@ -341,9 +342,9 @@
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
cdef np.ndarray[np.uint8_t, ndim=1] coords
- coords = np.zeros((num_octs * 8), dtype="uint8")
cdef OctVisitorData data
self.setup_data(&data, domain_id)
+ coords = np.zeros((num_octs * data.nz), dtype="uint8")
data.array = <void *> coords.data
self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
return coords.astype("bool")
@@ -355,11 +356,10 @@
int domain_id = -1):
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
- cdef np.ndarray[np.int64_t, ndim=2] coords
- # TODO: This *8 needs to be made generic.
- coords = np.empty((num_octs * 8, 3), dtype="int64")
cdef OctVisitorData data
self.setup_data(&data, domain_id)
+ cdef np.ndarray[np.int64_t, ndim=2] coords
+ coords = np.empty((num_octs * data.nz, 3), dtype="int64")
data.array = <void *> coords.data
self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
return coords
@@ -371,12 +371,11 @@
int domain_id = -1):
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
+ cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
#Return the 'resolution' of each cell; ie the level
cdef np.ndarray[np.int64_t, ndim=1] res
- # TODO: This *8 needs to be made generic.
- res = np.empty(num_octs * 8, dtype="int64")
- cdef OctVisitorData data
- self.setup_data(&data, domain_id)
+ res = np.empty(num_octs * data.nz, dtype="int64")
data.array = <void *> res.data
self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
return res
@@ -388,11 +387,10 @@
int domain_id = -1):
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
- cdef np.ndarray[np.float64_t, ndim=2] fwidth
- # TODO: This *8 needs to be made generic.
- fwidth = np.empty((num_octs * 8, 3), dtype="float64")
cdef OctVisitorData data
self.setup_data(&data, domain_id)
+ cdef np.ndarray[np.float64_t, ndim=2] fwidth
+ fwidth = np.empty((num_octs * data.nz, 3), dtype="float64")
data.array = <void *> fwidth.data
self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
cdef np.float64_t base_dx
@@ -408,12 +406,11 @@
int domain_id = -1):
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
+ cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
#Return the floating point unitary position of every cell
cdef np.ndarray[np.float64_t, ndim=2] coords
- # TODO: This *8 needs to be made generic.
- coords = np.empty((num_octs * 8, 3), dtype="float64")
- cdef OctVisitorData data
- self.setup_data(&data, domain_id)
+ coords = np.empty((num_octs * data.nz, 3), dtype="float64")
data.array = <void *> coords.data
self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
cdef int i
@@ -462,11 +459,9 @@
else:
raise NotImplementedError
self.visit_all_octs(selector, func, &data)
- # TODO: This *8 needs to be made generic.
- if (data.global_index + 1) * 8 * data.dims > source.size:
+ if (data.global_index + 1) * data.nz * data.dims > source.size:
print "GLOBAL INDEX RAN AHEAD.",
- # TODO: This *8 needs to be made generic.
- print (data.global_index + 1) * 8 * data.dims - source.size
+ print (data.global_index + 1) * data.nz * data.dims - source.size
print dest.size, source.size, num_cells
raise RuntimeError
if data.index > dest.size:
@@ -566,7 +561,7 @@
if parent.children != NULL:
next = parent.children[cind(ind[0],ind[1],ind[2])]
else:
- # TODO: This *8 does NOT need to be made generic.
+ # This *8 does NOT need to be made generic.
parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
for i in range(8):
parent.children[i] = NULL
@@ -631,8 +626,7 @@
cdef OctVisitorData data
self.setup_data(&data, 1)
self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
- # TODO: This *8 needs to be made generic.
- assert ((data.global_index+1)*8 == data.index)
+ assert ((data.global_index+1)*data.nz == data.index)
cdef int root_node_compare(void *a, void *b) nogil:
cdef OctKey *ao, *bo
diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -45,6 +45,7 @@
np.int8_t level
np.int8_t oref # This is the level of overref. 1 => 8 zones, 2 => 64, etc.
# To calculate nzones, 1 << (oref * 3)
+ np.int32_t nz
ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
@@ -67,10 +68,15 @@
cdef oct_visitor_function fill_file_indices_rind
cdef inline int cind(int i, int j, int k):
+ # THIS ONLY WORKS FOR CHILDREN. It is not general for zones.
return (((i*2)+j)*2+k)
cdef inline int oind(OctVisitorData *data):
- return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
+ return (((data.ind[0]*(1<<data.oref))
+ +data.ind[1])*(1<<data.oref)
+ +data.ind[2])
cdef inline int rind(OctVisitorData *data):
- return (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])
+ return (((data.ind[2]*(1<<data.oref))
+ +data.ind[1])*(1<<data.oref)
+ +data.ind[0])
diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,8 +38,7 @@
if selected == 0: return
cdef int i
# There are this many records between "octs"
- # TODO: This 8 needs to be made into a generic value.
- cdef np.int64_t index = (data.global_index * 8)*data.dims
+ cdef np.int64_t index = (data.global_index * data.nz)*data.dims
cdef np.float64_t **p = <np.float64_t**> data.array
index += oind(data)*data.dims
for i in range(data.dims):
@@ -51,8 +50,7 @@
# "last" here tells us the dimensionality of the array.
if selected == 0: return
cdef int i
- # TODO: This 8 needs to be made into a generic value.
- cdef np.int64_t index = (data.global_index * 8)*data.dims
+ cdef np.int64_t index = (data.global_index * data.nz)*data.dims
cdef np.int64_t **p = <np.int64_t**> data.array
index += oind(data)*data.dims
for i in range(data.dims):
@@ -77,8 +75,7 @@
if data.last != o.domain_ind:
data.last = o.domain_ind
data.index += 1
- # TODO: This 8 needs to be made into a generic value.
- cdef np.int64_t index = data.index * 8
+ cdef np.int64_t index = data.index * data.nz
index += oind(data)
arr[index] = 1
@@ -86,8 +83,7 @@
if selected == 0: return
cdef int i
cdef np.uint8_t *arr = <np.uint8_t *> data.array
- # TODO: This 8 needs to be made into a generic value.
- cdef np.int64_t index = data.global_index * 8
+ cdef np.int64_t index = data.global_index * data.nz
index += oind(data)
arr[index] = 1
@@ -105,10 +101,8 @@
if selected == 0: return
cdef np.int64_t *coords = <np.int64_t*> data.array
cdef int i
- # TODO: data.ind and the number of bits we shift need to be made general
- # for octrees with > 8 zones.
for i in range(3):
- coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
+ coords[data.index * 3 + i] = (data.pos[i] << data.oref) + data.ind[i]
data.index += 1
cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -126,11 +120,9 @@
cdef np.float64_t *fcoords = <np.float64_t*> data.array
cdef int i
cdef np.float64_t c, dx
- # TODO: data.ind and the number of bits we shift in dx and in data.pos need
- # to be made general for octrees with > 8 zones.
- dx = 1.0 / (2 << data.level)
+ dx = 1.0 / ((1 << data.oref) << data.level)
for i in range(3):
- c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i])
+ c = <np.float64_t> ((data.pos[i] << data.oref ) + data.ind[i])
fcoords[data.index * 3 + i] = (c + 0.5) * dx
data.index += 1
@@ -143,7 +135,7 @@
cdef np.float64_t *fwidth = <np.float64_t*> data.array
cdef int i
cdef np.float64_t dx
- dx = 1.0 / (2 << data.level)
+ dx = 1.0 / ((1 << data.oref) << data.level)
for i in range(3):
fwidth[data.index * 3 + i] = dx
data.index += 1
diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -40,6 +40,9 @@
oct_visitor_function *func,
OctVisitorData *data,
int visit_covered = ?)
+ cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+ np.float64_t spos[3], np.float64_t sdds[3],
+ oct_visitor_function *func, int i, int j, int k)
cdef int select_grid(self, np.float64_t left_edge[3],
np.float64_t right_edge[3],
np.int32_t level, Oct *o = ?) nogil
diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -241,25 +241,59 @@
data.pos[2] = (data.pos[2] >> 1)
data.level -= 1
elif this_level == 1:
- # TODO: Refactor to enable multiple cells
- # This code should be able to iterate over
- # cells, even though the rest cannot.
- selected = self.select_cell(spos, sdds)
- if ch != NULL:
- selected *= self.overlap_cells
data.global_index += increment
increment = 0
- # data.ind refers to the cell, not to the oct.
- data.ind[0] = i
- data.ind[1] = j
- data.ind[2] = k
- func(root, data, selected)
+ self.visit_oct_cells(data, root, ch, spos, sdds,
+ func, i, j, k)
spos[2] += sdds[2]
spos[1] += sdds[1]
spos[0] += sdds[0]
this_level = 0 # We turn this off for the second pass.
iter += 1
+ cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+ np.float64_t spos[3], np.float64_t sdds[3],
+ oct_visitor_function *func, int i, int j, int k):
+ # We can short-circuit the whole process if data.oref == 1.
+ # This saves us some funny-business.
+ cdef int selected
+ if data.oref == 1:
+ selected = self.select_cell(spos, sdds)
+ if ch != NULL:
+ selected *= self.overlap_cells
+ # data.ind refers to the cell, not to the oct.
+ data.ind[0] = i
+ data.ind[1] = j
+ data.ind[2] = k
+ func(root, data, selected)
+ return
+ # Okay, now that we've got that out of the way, we have to do some
+ # other checks here. In this case, spos[] is the position of the
+ # center of a *possible* oct child, which means it is the center of a
+ # cluster of cells. That cluster might have 1, 8, 64, ... cells in it.
+ # But, we can figure it out by calculating the cell dds.
+ cdef np.float64_t dds[3], pos[3]
+ cdef int ci, cj, ck
+ for i in range(3):
+ dds[i] = sdds[i] / data.oref
+ # Boot strap at the first index.
+ pos[0] = (spos[0] - sdds[0]/2.0) + dds[0] * 0.5
+ for ci in range(data.oref):
+ pos[1] = (spos[1] - sdds[1]/2.0) + dds[1] * 0.5
+ for cj in range(data.oref):
+ pos[2] = (spos[2] - sdds[2]/2.0) + dds[2] * 0.5
+ for ck in range(data.oref):
+ selected = self.select_cell(pos, dds)
+ if ch != NULL:
+ selected *= self.overlap_cells
+ data.ind[0] = i
+ data.ind[1] = j
+ data.ind[2] = k
+ pos[2] += dds[2]
+ func(root, data, selected)
+ pos[1] += dds[1]
+ pos[0] += dds[0]
+
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
https://bitbucket.org/yt_analysis/yt-3.0/commits/53eafdeb8919/
Changeset: 53eafdeb8919
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-21 23:14:27
Summary: Starting to thread over_refine_factor through constructors.
Affected #: 3 files
diff -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,6 +96,7 @@
class ParticleStaticOutput(StaticOutput):
_unit_base = None
+ over_refine_factor = 1
def _set_units(self):
self.units = {}
@@ -154,8 +155,10 @@
def __init__(self, filename, data_style="gadget_binary",
additional_fields = (),
- unit_base = None, n_ref = 64):
+ unit_base = None, n_ref = 64,
+ over_refine_factor = 1):
self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
@@ -268,11 +271,13 @@
_particle_coordinates_name = "Coordinates"
_header_spec = None # Override so that there's no confusion
- def __init__(self, filename, data_style="OWLS", n_ref = 64):
+ def __init__(self, filename, data_style="OWLS", n_ref = 64,
+ over_refine_factor = 1):
self.storage_filename = None
- super(OWLSStaticOutput, self).__init__(filename, data_style,
- unit_base = None,
- n_ref = n_ref)
+ super(OWLSStaticOutput, self).__init__(
+ filename, data_style,
+ unit_base = None, n_ref = n_ref,
+ over_refine_factor = over_refine_factor)
def __repr__(self):
return os.path.basename(self.parameter_filename).split(".")[0]
diff -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -738,10 +738,11 @@
file_count = 1
filename_template = "stream_file"
n_ref = 64
+ over_refine_factor = 1
def load_particles(data, sim_unit_to_cm, bbox=None,
sim_time=0.0, periodicity=(True, True, True),
- n_ref = 64):
+ n_ref = 64, over_refine_factor = 1):
r"""Load a set of particles into yt as a
:class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
@@ -828,6 +829,7 @@
spf = StreamParticlesStaticOutput(handler)
spf.n_ref = n_ref
+ spf.over_refine_factor = over_refine_factor
spf.units["cm"] = sim_unit_to_cm
spf.units['1'] = 1.0
spf.units["unitary"] = 1.0
diff -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -86,7 +86,8 @@
sum(d.total_particles.values()) for d in self.data_files)
pf = self.parameter_file
self.oct_handler = ParticleOctreeContainer(
- [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
+ [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge,
+ over_refine = pf.over_refine_factor)
self.oct_handler.n_ref = pf.n_ref
mylog.info("Allocating for %0.3e particles", self.total_particles)
# No more than 256^3 in the region finder.
@@ -148,7 +149,8 @@
self.regions.identify_data_files(dobj.selector)]
base_region = getattr(dobj, "base_region", dobj)
subset = [ParticleOctreeSubset(base_region, data_files,
- self.parameter_file)]
+ self.parameter_file,
+ self.parameter_file.over_refine_factor)]
dobj._chunk_info = subset
dobj._current_chunk = list(self._chunk_all(dobj))[0]
https://bitbucket.org/yt_analysis/yt-3.0/commits/e9032910ce34/
Changeset: e9032910ce34
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-21 23:27:47
Summary: Initial implementation of the smoothing with over_refine.
Works for over_refine = 1, but not 2 or higher.
http://paste.yt-project.org/show/3797/
Affected #: 4 files
diff -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 -r e9032910ce34ce257354ff126a0d2b6f94979d48 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -151,7 +151,8 @@
cls = getattr(particle_smooth, "%s_smooth" % method, None)
if cls is None:
raise YTParticleDepositionNotImplemented(method)
- nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
+ nz = self.nz
+ nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
if fields is None: fields = []
op = cls(nvals, len(fields), 64)
op.initialize()
diff -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 -r e9032910ce34ce257354ff126a0d2b6f94979d48 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -248,9 +248,11 @@
cp[i] -= dds[i]/2.0 # Now centered
else:
cp[i] += dds[i]/2.0
- # We don't need to change dds[i] as it has been halved from the
- # oct width, thus making it already the cell width
- oinfo.dds[i] = dds[i] # Cell width
+ # We don't normally need to change dds[i] as it has been halved
+ # from the oct width, thus making it already the cell width.
+ # But, for some cases where the oref != 1, this needs to be
+ # changed.
+ oinfo.dds[i] = dds[i] / self.oref # Cell width
oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
oinfo.ipos[i] = ipos[i]
oinfo.level = level
diff -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 -r e9032910ce34ce257354ff126a0d2b6f94979d48 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -107,7 +107,8 @@
cdef np.int64_t *doffs, *pinds, *pcounts, poff
cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
cdef np.ndarray[np.float64_t, ndim=1] tarr
- dims[0] = dims[1] = dims[2] = 2
+ dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+ cdef int nz = dims[0] * dims[1] * dims[2]
numpart = positions.shape[0]
# pcount is the number of particles per oct.
pcount = np.zeros_like(dom_ind)
@@ -173,7 +174,7 @@
oct = octree.get(pos, &oi)
if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
continue
- offset = dom_ind[oct.domain_ind - moff] * 8
+ offset = dom_ind[oct.domain_ind - moff] * nz
neighbors = octree.neighbors(&oi, &nneighbors)
# Now we have all our neighbors. And, we should be set for what
# else we need to do.
diff -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 -r e9032910ce34ce257354ff126a0d2b6f94979d48 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -286,9 +286,9 @@
selected = self.select_cell(pos, dds)
if ch != NULL:
selected *= self.overlap_cells
- data.ind[0] = i
- data.ind[1] = j
- data.ind[2] = k
+ data.ind[0] = ci
+ data.ind[1] = cj
+ data.ind[2] = ck
pos[2] += dds[2]
func(root, data, selected)
pos[1] += dds[1]
https://bitbucket.org/yt_analysis/yt-3.0/commits/d1c15c019b54/
Changeset: d1c15c019b54
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-21 23:40:36
Summary: Fixing domain_dimensions to match number of zones.
Affected #: 1 file
diff -r e9032910ce34ce257354ff126a0d2b6f94979d48 -r d1c15c019b54a2cd720f378896cb6f069778acce yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -191,7 +191,8 @@
self.domain_left_edge = np.zeros(3, "float64")
self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
- self.domain_dimensions = np.ones(3, "int32") * 2
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
self.periodicity = (True, True, True)
self.cosmological_simulation = 1
@@ -297,7 +298,8 @@
self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
self.domain_left_edge = np.zeros(3, "float64")
self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
- self.domain_dimensions = np.ones(3, "int32") * 2
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
self.cosmological_simulation = 1
self.periodicity = (True, True, True)
self.current_redshift = hvals["Redshift"]
@@ -443,7 +445,8 @@
self.parameters[param] = val
self.current_time = hvals["time"]
- self.domain_dimensions = np.ones(3, "int32") * 2
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
if self.parameters.get('bPeriodic', True):
self.periodicity = (True, True, True)
else:
https://bitbucket.org/yt_analysis/yt-3.0/commits/b282b6c89e67/
Changeset: b282b6c89e67
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-22 19:53:05
Summary: Order of arguments fixed, also fixing cell/oct width in oi.
Affected #: 3 files
diff -r d1c15c019b54a2cd720f378896cb6f069778acce -r b282b6c89e67388f40f1a970b1e375757eae5f4d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -291,8 +291,10 @@
cdef np.int64_t npos[3], ndim[3]
# Now we get our boundaries for this level, so that we can wrap around
# if need be.
+ # ndim is the oct dimensions of the level, not the cell dimensions.
for i in range(3):
- ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i])/(2*oi.dds[i]))
+ ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+ ndim[i] = (ndim[i] >> self.oref)
for i in range(3):
npos[0] = (oi.ipos[0] + (1 - i))
if npos[0] < 0: npos[0] += ndim[0]
@@ -325,6 +327,7 @@
nfound += 1
olist = OctList_append(olist, cand)
if my_list == NULL: my_list = olist
+
olist = my_list
cdef int noct = OctList_count(olist)
cdef Oct **neighbors
diff -r d1c15c019b54a2cd720f378896cb6f069778acce -r b282b6c89e67388f40f1a970b1e375757eae5f4d yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -148,9 +148,9 @@
data_files = [self.data_files[i] for i in
self.regions.identify_data_files(dobj.selector)]
base_region = getattr(dobj, "base_region", dobj)
+ oref = self.parameter_file.over_refine_factor
subset = [ParticleOctreeSubset(base_region, data_files,
- self.parameter_file,
- self.parameter_file.over_refine_factor)]
+ self.parameter_file, over_refine_factor = oref)]
dobj._chunk_info = subset
dobj._current_chunk = list(self._chunk_all(dobj))[0]
diff -r d1c15c019b54a2cd720f378896cb6f069778acce -r b282b6c89e67388f40f1a970b1e375757eae5f4d yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -98,7 +98,7 @@
# that we can deal with >27 neighbors. As I write this comment,
# neighbors() only returns 27 neighbors.
cdef int nf, i, j, dims[3], n
- cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos
+ cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
cdef int nsize = 0
cdef np.int64_t *nind = NULL
cdef OctInfo oi
@@ -144,6 +144,7 @@
# actually be indirectly-sorted fields. This preserves memory at the
# expense of additional pointer lookups.
pind = np.argsort(pdoms)
+ pind = np.asarray(pind, dtype='int64', order='C')
# So what this means is that we now have all the oct-0 particle indices
# in order, then the oct-1, etc etc.
# This now gives us the indices to the particles for each domain.
@@ -176,6 +177,8 @@
continue
offset = dom_ind[oct.domain_ind - moff] * nz
neighbors = octree.neighbors(&oi, &nneighbors)
+ for j in range(3):
+ dds[j] = oi.dds[j] / octree.oref
# Now we have all our neighbors. And, we should be set for what
# else we need to do.
if nneighbors > nsize:
@@ -190,7 +193,7 @@
break
# This is allocated by the neighbors function, so we deallocate it.
free(neighbors)
- self.neighbor_process(dims, oi.left_edge, oi.dds,
+ self.neighbor_process(dims, oi.left_edge, dds,
ppos, field_pointers, nneighbors, nind, doffs,
pinds, pcounts, offset)
if nind != NULL:
@@ -332,6 +335,9 @@
free(self.fp)
return self.vals
+ @cython.cdivision(True)
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
cdef void process(self, np.int64_t offset, int i, int j, int k,
int dim[3], np.float64_t cpos[3], np.float64_t **fields):
# We have our i, j, k for our cell, as well as the cell position.
https://bitbucket.org/yt_analysis/yt-3.0/commits/e36b3ff3f5d5/
Changeset: e36b3ff3f5d5
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-22 21:17:13
Summary: Fixing typo in setting of dds. Over refine now preliminarily works.
Affected #: 2 files
diff -r b282b6c89e67388f40f1a970b1e375757eae5f4d -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -123,9 +123,17 @@
self.root_mesh[i][j][k] = NULL
cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
+ cdef int i
data.index = 0
data.last = -1
+ data.global_index = -1
+ for i in range(3):
+ data.pos[i] = -1
+ data.ind[i] = -1
+ data.array = NULL
+ data.dims = 0
data.domain = domain_id
+ data.level = -1
data.oref = self.oref
data.nz = (1 << (data.oref*3))
@@ -472,6 +480,8 @@
if data.index > dest.size:
print "DEST INDEX RAN AHEAD.",
print data.index - dest.size
+ print (data.global_index + 1) * data.nz * data.dims, source.size
+ print num_cells
raise RuntimeError
if num_cells >= 0:
return dest
diff -r b282b6c89e67388f40f1a970b1e375757eae5f4d -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -274,23 +274,24 @@
# But, we can figure it out by calculating the cell dds.
cdef np.float64_t dds[3], pos[3]
cdef int ci, cj, ck
- for i in range(3):
- dds[i] = sdds[i] / data.oref
+ cdef int nr = (1 << (data.oref - 1))
+ for ci in range(3):
+ dds[ci] = sdds[ci] / nr
# Boot strap at the first index.
pos[0] = (spos[0] - sdds[0]/2.0) + dds[0] * 0.5
- for ci in range(data.oref):
+ for ci in range(nr):
pos[1] = (spos[1] - sdds[1]/2.0) + dds[1] * 0.5
- for cj in range(data.oref):
+ for cj in range(nr):
pos[2] = (spos[2] - sdds[2]/2.0) + dds[2] * 0.5
- for ck in range(data.oref):
+ for ck in range(nr):
selected = self.select_cell(pos, dds)
if ch != NULL:
selected *= self.overlap_cells
- data.ind[0] = ci
- data.ind[1] = cj
- data.ind[2] = ck
+ data.ind[0] = ci + i * nr
+ data.ind[1] = cj + j * nr
+ data.ind[2] = ck + k * nr
+ func(root, data, selected)
pos[2] += dds[2]
- func(root, data, selected)
pos[1] += dds[1]
pos[0] += dds[0]
https://bitbucket.org/yt_analysis/yt-3.0/commits/7830ab86ae14/
Changeset: 7830ab86ae14
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-23 02:15:23
Summary: Fixes for Oct dds and making a few things more clear.
Affected #: 4 files
diff -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -260,7 +260,7 @@
# from the oct width, thus making it already the cell width.
# But, for some cases where the oref != 1, this needs to be
# changed.
- oinfo.dds[i] = dds[i] / self.oref # Cell width
+ oinfo.dds[i] = dds[i] / (1 << (self.oref-1)) # Cell width
oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
oinfo.ipos[i] = ipos[i]
oinfo.level = level
diff -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -72,11 +72,9 @@
return (((i*2)+j)*2+k)
cdef inline int oind(OctVisitorData *data):
- return (((data.ind[0]*(1<<data.oref))
- +data.ind[1])*(1<<data.oref)
- +data.ind[2])
+ cdef int d = (1 << data.oref)
+ return (((data.ind[0]*d)+data.ind[1])*d+data.ind[2])
cdef inline int rind(OctVisitorData *data):
- return (((data.ind[2]*(1<<data.oref))
- +data.ind[1])*(1<<data.oref)
- +data.ind[0])
+ cdef int d = (1 << data.oref)
+ return (((data.ind[2]*d)+data.ind[1])*d+data.ind[0])
diff -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -65,7 +65,8 @@
tarr = fields[i]
field_pointers[i] = <np.float64_t *> tarr.data
cdef int dims[3]
- dims[0] = dims[1] = dims[2] = 2
+ dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+ cdef int nz = dims[0] * dims[1] * dims[2]
cdef OctInfo oi
cdef np.int64_t offset, moff
cdef Oct *oct
@@ -97,7 +98,7 @@
if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
continue
# Note that this has to be our local index, not our in-file index.
- offset = dom_ind[oct.domain_ind - moff] * 8
+ offset = dom_ind[oct.domain_ind - moff] * nz
if offset < 0: continue
# Check that we found the oct ...
self.process(dims, oi.left_edge, oi.dds,
diff -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -177,8 +177,6 @@
continue
offset = dom_ind[oct.domain_ind - moff] * nz
neighbors = octree.neighbors(&oi, &nneighbors)
- for j in range(3):
- dds[j] = oi.dds[j] / octree.oref
# Now we have all our neighbors. And, we should be set for what
# else we need to do.
if nneighbors > nsize:
@@ -193,7 +191,7 @@
break
# This is allocated by the neighbors function, so we deallocate it.
free(neighbors)
- self.neighbor_process(dims, oi.left_edge, dds,
+ self.neighbor_process(dims, oi.left_edge, oi.dds,
ppos, field_pointers, nneighbors, nind, doffs,
pinds, pcounts, offset)
if nind != NULL:
https://bitbucket.org/yt_analysis/yt-3.0/commits/34ebb7b6fabd/
Changeset: 34ebb7b6fabd
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-28 20:26:33
Summary: Merging from tip of yt-3.0
Affected #: 26 files
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -832,8 +832,8 @@
echo "Building BLAS"
cd BLAS
gfortran -O2 -fPIC -fno-second-underscore -c *.f
- ar r libfblas.a *.o &>> ${LOG_FILE}
- ranlib libfblas.a 1>> ${LOG_FILE}
+ ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+ ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
rm -rf *.o
touch done
cd ..
@@ -844,7 +844,7 @@
echo "Building LAPACK"
cd $LAPACK/
cp INSTALL/make.inc.gfortran make.inc
- make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+ ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
fi
@@ -943,10 +943,10 @@
touch done
cd $MY_PWD
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
then
echo "Installing pure-python readline"
- ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+ ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
fi
if [ $INST_ENZO -eq 1 ]
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1062,8 +1062,9 @@
def __init__(self, data_source, dm_only=True, redshift=-1):
"""
Run hop on *data_source* with a given density *threshold*. If
- *dm_only* is True (default), only run it on the dark matter particles, otherwise
- on all particles. Returns an iterable collection of *HopGroup* items.
+ *dm_only* is True (default), only run it on the dark matter particles,
+ otherwise on all particles. Returns an iterable collection of
+ *HopGroup* items.
"""
self._data_source = data_source
self.dm_only = dm_only
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -1,5 +1,6 @@
from yt.testing import *
import os
+import tempfile
def setup():
from yt.config import ytcfg
@@ -7,7 +8,10 @@
def teardown_func(fns):
for fn in fns:
- os.remove(fn)
+ try:
+ os.remove(fn)
+ except OSError:
+ pass
def test_cutting_plane():
for nprocs in [8, 1]:
@@ -23,7 +27,9 @@
yield assert_equal, cut["Ones"].min(), 1.0
yield assert_equal, cut["Ones"].max(), 1.0
pw = cut.to_pw()
- fns += pw.save()
+ tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+ os.close(tmpfd)
+ fns += pw.save(name=tmpname)
frb = cut.to_frb((1.0,'unitary'), 64)
for cut_field in ['Ones', 'Density']:
yield assert_equal, frb[cut_field].info['data_source'], \
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -1,130 +1,94 @@
-from yt.testing import *
-from yt.data_objects.image_array import ImageArray
import numpy as np
import os
import tempfile
import shutil
+import unittest
+from yt.data_objects.image_array import ImageArray
+from yt.testing import \
+ assert_equal
+
def setup():
from yt.config import ytcfg
- ytcfg["yt","__withintesting"] = "True"
- np.seterr(all = 'ignore')
+ ytcfg["yt", "__withintesting"] = "True"
+ np.seterr(all='ignore')
+
+
+def dummy_image(kstep, nlayers):
+ im = np.zeros([64, 128, nlayers])
+ for i in xrange(im.shape[0]):
+ for k in xrange(im.shape[2]):
+ im[i, :, k] = np.linspace(0.0, kstep * k, im.shape[1])
+ return im
+
def test_rgba_rescale():
- im = np.zeros([64,128,4])
- for i in xrange(im.shape[0]):
- for k in xrange(im.shape[2]):
- im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
- im_arr = ImageArray(im)
+ im_arr = ImageArray(dummy_image(10.0, 4))
new_im = im_arr.rescale(inline=False)
- yield assert_equal, im_arr[:,:,:3].max(), 2*10.
- yield assert_equal, im_arr[:,:,3].max(), 3*10.
- yield assert_equal, new_im[:,:,:3].sum(axis=2).max(), 1.0
- yield assert_equal, new_im[:,:,3].max(), 1.0
+ yield assert_equal, im_arr[:, :, :3].max(), 2 * 10.
+ yield assert_equal, im_arr[:, :, 3].max(), 3 * 10.
+ yield assert_equal, new_im[:, :, :3].sum(axis=2).max(), 1.0
+ yield assert_equal, new_im[:, :, 3].max(), 1.0
im_arr.rescale()
- yield assert_equal, im_arr[:,:,:3].sum(axis=2).max(), 1.0
- yield assert_equal, im_arr[:,:,3].max(), 1.0
+ yield assert_equal, im_arr[:, :, :3].sum(axis=2).max(), 1.0
+ yield assert_equal, im_arr[:, :, 3].max(), 1.0
-def test_image_array_hdf5():
- # Perform I/O in safe place instead of yt main dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
- im = np.zeros([64,128,3])
- for i in xrange(im.shape[0]):
- for k in xrange(im.shape[2]):
- im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+class TestImageArray(unittest.TestCase):
- myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
- 'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
- 'width':0.245, 'units':'cm', 'type':'rendering'}
+ tmpdir = None
+ curdir = None
- im_arr = ImageArray(im, info=myinfo)
- im_arr.save('test_3d_ImageArray')
+ def setUp(self):
+ self.tmpdir = tempfile.mkdtemp()
+ self.curdir = os.getcwd()
+ os.chdir(self.tmpdir)
- im = np.zeros([64,128])
- for i in xrange(im.shape[0]):
- im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+ def test_image_array_hdf5(self):
+ myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+ 'north_vector': np.array([0., 0., 1.]),
+ 'normal_vector': np.array([0., 1., 0.]),
+ 'width': 0.245, 'units': 'cm', 'type': 'rendering'}
- myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
- 'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
- 'width':0.245, 'units':'cm', 'type':'rendering'}
+ im_arr = ImageArray(dummy_image(0.3, 3), info=myinfo)
+ im_arr.save('test_3d_ImageArray')
- im_arr = ImageArray(im, info=myinfo)
- im_arr.save('test_2d_ImageArray')
+ im = np.zeros([64, 128])
+ for i in xrange(im.shape[0]):
+ im[i, :] = np.linspace(0., 0.3 * 2, im.shape[1])
- os.chdir(curdir)
- # clean up
- shutil.rmtree(tmpdir)
+ myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+ 'north_vector': np.array([0., 0., 1.]),
+ 'normal_vector': np.array([0., 1., 0.]),
+ 'width': 0.245, 'units': 'cm', 'type': 'rendering'}
-def test_image_array_rgb_png():
- # Perform I/O in safe place instead of yt main dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
+ im_arr = ImageArray(im, info=myinfo)
+ im_arr.save('test_2d_ImageArray')
- im = np.zeros([64,128,3])
- for i in xrange(im.shape[0]):
- for k in xrange(im.shape[2]):
- im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+ def test_image_array_rgb_png(self):
+ im_arr = ImageArray(dummy_image(10.0, 3))
+ im_arr.write_png('standard.png')
- im_arr = ImageArray(im)
- im_arr.write_png('standard.png')
+ def test_image_array_rgba_png(self):
+ im_arr = ImageArray(dummy_image(10.0, 4))
+ im_arr.write_png('standard.png')
+ im_arr.write_png('non-scaled.png', rescale=False)
+ im_arr.write_png('black_bg.png', background='black')
+ im_arr.write_png('white_bg.png', background='white')
+ im_arr.write_png('green_bg.png', background=[0., 1., 0., 1.])
+ im_arr.write_png('transparent_bg.png', background=None)
-def test_image_array_rgba_png():
- # Perform I/O in safe place instead of yt main dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
+ def test_image_array_background(self):
+ im_arr = ImageArray(dummy_image(10.0, 4))
+ im_arr.rescale()
+ new_im = im_arr.add_background_color([1., 0., 0., 1.], inline=False)
+ new_im.write_png('red_bg.png')
+ im_arr.add_background_color('black')
+ im_arr.write_png('black_bg2.png')
- im = np.zeros([64,128,4])
- for i in xrange(im.shape[0]):
- for k in xrange(im.shape[2]):
- im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
- im_arr = ImageArray(im)
- im_arr.write_png('standard.png')
- im_arr.write_png('non-scaled.png', rescale=False)
- im_arr.write_png('black_bg.png', background='black')
- im_arr.write_png('white_bg.png', background='white')
- im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
- im_arr.write_png('transparent_bg.png', background=None)
-
-
-def test_image_array_background():
- # Perform I/O in safe place instead of yt main dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- im = np.zeros([64,128,4])
- for i in xrange(im.shape[0]):
- for k in xrange(im.shape[2]):
- im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
- im_arr = ImageArray(im)
- im_arr.rescale()
- new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
- new_im.write_png('red_bg.png')
- im_arr.add_background_color('black')
- im_arr.write_png('black_bg2.png')
-
- os.chdir(curdir)
- # clean up
- shutil.rmtree(tmpdir)
-
-
-
-
-
-
-
-
-
-
-
-
-
+ def tearDown(self):
+ os.chdir(self.curdir)
+ # clean up
+ shutil.rmtree(self.tmpdir)
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,5 +1,6 @@
from yt.testing import *
import os
+import tempfile
def setup():
from yt.config import ytcfg
@@ -7,7 +8,10 @@
def teardown_func(fns):
for fn in fns:
- os.remove(fn)
+ try:
+ os.remove(fn)
+ except OSError:
+ pass
def test_projection():
for nprocs in [8, 1]:
@@ -37,7 +41,9 @@
yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
pw = proj.to_pw()
- fns += pw.save()
+ tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+ os.close(tmpfd)
+ fns += pw.save(name=tmpname)
frb = proj.to_frb((1.0,'unitary'), 64)
for proj_field in ['Ones', 'Density']:
yield assert_equal, frb[proj_field].info['data_source'], \
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -27,6 +27,7 @@
"""
import os
import numpy as np
+import tempfile
from nose.tools import raises
from yt.testing import \
fake_random_pf, assert_equal, assert_array_equal
@@ -42,7 +43,10 @@
def teardown_func(fns):
for fn in fns:
- os.remove(fn)
+ try:
+ os.remove(fn)
+ except OSError:
+ pass
def test_slice():
@@ -72,7 +76,9 @@
yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
pw = slc.to_pw()
- fns += pw.save()
+ tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+ os.close(tmpfd)
+ fns += pw.save(name=tmpname)
frb = slc.to_frb((1.0, 'unitary'), 64)
for slc_field in ['Ones', 'Density']:
yield assert_equal, frb[slc_field].info['data_source'], \
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/extern/__init__.py
--- /dev/null
+++ b/yt/extern/__init__.py
@@ -0,0 +1,4 @@
+"""
+This packages contains python packages that are bundled with yt
+and are developed by 3rd party upstream.
+"""
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/extern/parameterized.py
--- /dev/null
+++ b/yt/extern/parameterized.py
@@ -0,0 +1,226 @@
+import re
+import inspect
+from functools import wraps
+from collections import namedtuple
+
+from nose.tools import nottest
+from unittest import TestCase
+
+from . import six
+
+if six.PY3:
+ def new_instancemethod(f, *args):
+ return f
+else:
+ import new
+ new_instancemethod = new.instancemethod
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+ """ Represents a single parameter to a test case.
+
+ For example::
+
+ >>> p = param("foo", bar=16)
+ >>> p
+ param("foo", bar=16)
+ >>> p.args
+ ('foo', )
+ >>> p.kwargs
+ {'bar': 16}
+
+ Intended to be used as an argument to ``@parameterized``::
+
+ @parameterized([
+ param("foo", bar=16),
+ ])
+ def test_stuff(foo, bar=16):
+ pass
+ """
+
+ def __new__(cls, *args , **kwargs):
+ return _param.__new__(cls, args, kwargs)
+
+ @classmethod
+ def explicit(cls, args=None, kwargs=None):
+ """ Creates a ``param`` by explicitly specifying ``args`` and
+ ``kwargs``::
+
+ >>> param.explicit([1,2,3])
+ param(*(1, 2, 3))
+ >>> param.explicit(kwargs={"foo": 42})
+ param(*(), **{"foo": "42"})
+ """
+ args = args or ()
+ kwargs = kwargs or {}
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_decorator(cls, args):
+ """ Returns an instance of ``param()`` for ``@parameterized`` argument
+ ``args``::
+
+ >>> param.from_decorator((42, ))
+ param(args=(42, ), kwargs={})
+ >>> param.from_decorator("foo")
+ param(args=("foo", ), kwargs={})
+ """
+ if isinstance(args, param):
+ return args
+ if isinstance(args, six.string_types):
+ args = (args, )
+ return cls(*args)
+
+ def __repr__(self):
+ return "param(*%r, **%r)" %self
+
+class parameterized(object):
+ """ Parameterize a test case::
+
+ class TestInt(object):
+ @parameterized([
+ ("A", 10),
+ ("F", 15),
+ param("10", 42, base=42)
+ ])
+ def test_int(self, input, expected, base=16):
+ actual = int(input, base=base)
+ assert_equal(actual, expected)
+
+ @parameterized([
+ (2, 3, 5)
+ (3, 5, 8),
+ ])
+ def test_add(a, b, expected):
+ assert_equal(a + b, expected)
+ """
+
+ def __init__(self, input):
+ self.get_input = self.input_as_callable(input)
+
+ def __call__(self, test_func):
+ self.assert_not_in_testcase_subclass()
+
+ @wraps(test_func)
+ def parameterized_helper_method(test_self=None):
+ f = test_func
+ if test_self is not None:
+ # If we are a test method (which we suppose to be true if we
+ # are being passed a "self" argument), we first need to create
+ # an instance method, attach it to the instance of the test
+ # class, then pull it back off to turn it into a bound method.
+ # If we don't do this, Nose gets cranky.
+ f = self.make_bound_method(test_self, test_func)
+ # Note: because nose is so very picky, the more obvious
+ # ``return self.yield_nose_tuples(f)`` won't work here.
+ for nose_tuple in self.yield_nose_tuples(f):
+ yield nose_tuple
+
+ test_func.__name__ = "_helper_for_%s" %(test_func.__name__, )
+ parameterized_helper_method.parameterized_input = input
+ parameterized_helper_method.parameterized_func = test_func
+ return parameterized_helper_method
+
+ def yield_nose_tuples(self, func):
+ for args in self.get_input():
+ p = param.from_decorator(args)
+ # ... then yield that as a tuple. If those steps aren't
+ # followed precicely, Nose gets upset and doesn't run the test
+ # or doesn't run setup methods.
+ yield self.param_as_nose_tuple(p, func)
+
+ def param_as_nose_tuple(self, p, func):
+ nose_func = func
+ nose_args = p.args
+ if p.kwargs:
+ nose_func = wraps(func)(lambda args, kwargs: func(*args, **kwargs))
+ nose_args = (p.args, p.kwargs)
+ return (nose_func, ) + nose_args
+
+ def make_bound_method(self, instance, func):
+ cls = type(instance)
+ im_f = new_instancemethod(func, None, cls)
+ setattr(cls, func.__name__, im_f)
+ return getattr(instance, func.__name__)
+
+ def assert_not_in_testcase_subclass(self):
+ parent_classes = self._terrible_magic_get_defining_classes()
+ if any(issubclass(cls, TestCase) for cls in parent_classes):
+ raise Exception("Warning: '@parameterized' tests won't work "
+ "inside subclasses of 'TestCase' - use "
+ "'@parameterized.expand' instead")
+
+ def _terrible_magic_get_defining_classes(self):
+ """ Returns the set of parent classes of the class currently being defined.
+ Will likely only work if called from the ``parameterized`` decorator.
+ This function is entirely @brandon_rhodes's fault, as he suggested
+ the implementation: http://stackoverflow.com/a/8793684/71522
+ """
+ stack = inspect.stack()
+ if len(stack) <= 4:
+ return []
+ frame = stack[4]
+ code_context = frame[4] and frame[4][0].strip()
+ if not (code_context and code_context.startswith("class ")):
+ return []
+ _, parents = code_context.split("(", 1)
+ parents, _ = parents.rsplit(")", 1)
+ return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+ @classmethod
+ def input_as_callable(cls, input):
+ if callable(input):
+ return lambda: cls.check_input_values(input())
+ input_values = cls.check_input_values(input)
+ return lambda: input_values
+
+ @classmethod
+ def check_input_values(cls, input_values):
+ if not hasattr(input_values, "__iter__"):
+ raise ValueError("expected iterable input; got %r" %(input, ))
+ return input_values
+
+ @classmethod
+ def expand(cls, input):
+ """ A "brute force" method of parameterizing test cases. Creates new
+ test cases and injects them into the namespace that the wrapped
+ function is being defined in. Useful for parameterizing tests in
+ subclasses of 'UnitTest', where Nose test generators don't work.
+
+ >>> @parameterized.expand([("foo", 1, 2)])
+ ... def test_add1(name, input, expected):
+ ... actual = add1(input)
+ ... assert_equal(actual, expected)
+ ...
+ >>> locals()
+ ... 'test_add1_foo_0': <function ...> ...
+ >>>
+ """
+
+ def parameterized_expand_wrapper(f):
+ stack = inspect.stack()
+ frame = stack[1]
+ frame_locals = frame[0].f_locals
+
+ base_name = f.__name__
+ get_input = cls.input_as_callable(input)
+ for num, args in enumerate(get_input()):
+ p = param.from_decorator(args)
+ name_suffix = "_%s" %(num, )
+ if len(p.args) > 0 and isinstance(p.args[0], six.string_types):
+ name_suffix += "_" + cls.to_safe_name(p.args[0])
+ name = base_name + name_suffix
+ frame_locals[name] = cls.param_as_standalone_func(p, f, name)
+ return nottest(f)
+ return parameterized_expand_wrapper
+
+ @classmethod
+ def param_as_standalone_func(cls, p, func, name):
+ standalone_func = lambda *a: func(*(a + p.args), **p.kwargs)
+ standalone_func.__name__ = name
+ return standalone_func
+
+ @classmethod
+ def to_safe_name(cls, s):
+ return str(re.sub("[^a-zA-Z0-9_]", "", s))
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/extern/six.py
--- /dev/null
+++ b/yt/extern/six.py
@@ -0,0 +1,404 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2013 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin at python.org>"
+__version__ = "1.3.0"
+
+
+# True if we are running on Python 3.
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result)
+ # This is a bit ugly, but it avoids running this again.
+ delattr(tp, self.name)
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+
+class _MovedItems(types.ModuleType):
+ """Lazy loading of moved objects"""
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("winreg", "_winreg"),
+]
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+del attr
+
+moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+
+ _iterkeys = "keys"
+ _itervalues = "values"
+ _iteritems = "items"
+ _iterlists = "lists"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+ _iterkeys = "iterkeys"
+ _itervalues = "itervalues"
+ _iteritems = "iteritems"
+ _iterlists = "iterlists"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+def iterkeys(d, **kw):
+ """Return an iterator over the keys of a dictionary."""
+ return iter(getattr(d, _iterkeys)(**kw))
+
+def itervalues(d, **kw):
+ """Return an iterator over the values of a dictionary."""
+ return iter(getattr(d, _itervalues)(**kw))
+
+def iteritems(d, **kw):
+ """Return an iterator over the (key, value) pairs of a dictionary."""
+ return iter(getattr(d, _iteritems)(**kw))
+
+def iterlists(d, **kw):
+ """Return an iterator over the (key, [values]) pairs of a dictionary."""
+ return iter(getattr(d, _iterlists)(**kw))
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+ def u(s):
+ return s
+ if sys.version_info[1] <= 1:
+ def int2byte(i):
+ return bytes((i,))
+ else:
+ # This is about 2x faster than the implementation above on 3.2+
+ int2byte = operator.methodcaller("to_bytes", 1, "big")
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+else:
+ def b(s):
+ return s
+ def u(s):
+ return unicode(s, "unicode_escape")
+ int2byte = chr
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+if PY3:
+ import builtins
+ exec_ = getattr(builtins, "exec")
+
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+
+ print_ = getattr(builtins, "print")
+ del builtins
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+ def print_(*args, **kwargs):
+ """The new-style print function."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+
+_add_doc(reraise, """Reraise an exception.""")
+
+
+def with_metaclass(meta, base=object):
+ """Create a base class with a metaclass."""
+ return meta("NewBase", (base,), {})
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -68,9 +68,9 @@
data = data[2::3].reshape(grid_dims,order='F').copy()
f.close()
if grid.pf.field_ordering == 1:
- return data.T
+ return data.T.astype("float64")
else:
- return data
+ return data.astype("float64")
def _read_data_slice(self, grid, field, axis, coord):
sl = [slice(None), slice(None), slice(None)]
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -224,7 +224,10 @@
else:
self.units[field_name] = 1.0
if 'field_units' in current_field.attrs:
- current_fields_unit = just_one(current_field.attrs['field_units'])
+ if type(current_field.attrs['field_units']) == str:
+ current_fields_unit = current_field.attrs['field_units']
+ else:
+ current_fields_unit = just_one(current_field.attrs['field_units'])
else:
current_fields_unit = ""
self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -84,8 +84,11 @@
units=r"\rm{cm}/\rm{s}")
for f,v in log_translation_dict.items():
- add_field(f, TranslationFunc(v), take_log=True)
+ add_field(f, TranslationFunc(v), take_log=True,
+ units=KnownGDFFields[v].get_units(),
+ projected_units=KnownGDFFields[v].get_projected_units())
for f,v in translation_dict.items():
- add_field(f, TranslationFunc(v), take_log=False)
-
+ add_field(f, TranslationFunc(v), take_log=False,
+ units=KnownGDFFields[v].get_units(),
+ projected_units=KnownGDFFields[v].get_projected_units())
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -215,6 +215,7 @@
self.amr_header['nboundary']*l]
return ng
min_level = self.pf.min_level
+ max_level = min_level
nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
for level in range(self.amr_header['nlevelmax']):
# Easier if do this 1-indexed
@@ -248,6 +249,8 @@
assert(pos.shape[0] == ng)
n = self.oct_handler.add(cpu + 1, level - min_level, pos)
assert(n == ng)
+ if n > 0: max_level = max(level - min_level, max_level)
+ self.max_level = max_level
self.oct_handler.finalize()
def included(self, selector):
@@ -297,7 +300,7 @@
# for now, the hierarchy file is the parameter file!
self.hierarchy_filename = self.parameter_file.parameter_filename
self.directory = os.path.dirname(self.hierarchy_filename)
- self.max_level = pf.max_level
+ self.max_level = None
self.float_type = np.float64
super(RAMSESGeometryHandler, self).__init__(pf, data_style)
@@ -308,6 +311,7 @@
for i in range(self.parameter_file['ncpu'])]
total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
for dom in self.domains)
+ self.max_level = max(dom.max_level for dom in self.domains)
self.num_grids = total_octs
def _detect_fields(self):
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -122,15 +122,17 @@
self.max_level = getattr(dobj, "max_level", 99)
self.overlap_cells = 0
- if dobj is None:
- for i in range(3):
- self.periodicity[i] = False
- self.domain_width[i] = 0.0
- else:
- for i in range(3) :
- self.domain_width[i] = dobj.pf.domain_right_edge[i] - \
- dobj.pf.domain_left_edge[i]
- self.periodicity[i] = dobj.pf.periodicity[i]
+ for i in range(3) :
+ pf = getattr(dobj, 'pf', None)
+ if pf is None:
+ for i in range(3):
+ self.domain_width[i] = 1.0
+ self.periodicity[i] = False
+ else:
+ for i in range(3):
+ self.domain_width[i] = pf.domain_right_edge[i] - \
+ pf.domain_left_edge[i]
+ self.periodicity[i] = pf.periodicity[i]
@cython.boundscheck(False)
@cython.wraparound(False)
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -144,7 +144,8 @@
get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
callback_registry, write_bitmap, write_image, annotate_image, \
apply_colormap, scale_image, write_projection, write_fits, \
- SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
+ SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot, \
+ show_colormaps
from yt.visualization.volume_rendering.api import \
ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/setup.py
--- a/yt/setup.py
+++ b/yt/setup.py
@@ -9,6 +9,7 @@
config = Configuration('yt', parent_package, top_path)
config.add_subpackage('analysis_modules')
config.add_subpackage('data_objects')
+ config.add_subpackage('extern')
config.add_subpackage('frontends')
config.add_subpackage('geometry')
config.add_subpackage('gui')
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -33,6 +33,7 @@
import cPickle
import shelve
import zlib
+import tempfile
from matplotlib.testing.compare import compare_images
from nose.plugins import Plugin
@@ -604,9 +605,11 @@
self.plot_axis, self.plot_kwargs)
attr = getattr(plot, self.attr_name)
attr(*self.attr_args[0], **self.attr_args[1])
- fn = plot.save()[0]
- image = mpimg.imread(fn)
- os.remove(fn)
+ tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+ os.close(tmpfd)
+ plot.save(name=tmpname)
+ image = mpimg.imread(tmpname)
+ os.remove(tmpname)
return [zlib.compress(image.dumps())]
def compare(self, new_result, old_result):
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/utilities/grid_data_format/tests/test_writer.py
--- a/yt/utilities/grid_data_format/tests/test_writer.py
+++ b/yt/utilities/grid_data_format/tests/test_writer.py
@@ -50,17 +50,18 @@
tmpdir = tempfile.mkdtemp()
tmpfile = os.path.join(tmpdir, 'test_gdf.h5')
- test_pf = fake_random_pf(64)
- write_to_gdf(test_pf, tmpfile, data_author=TEST_AUTHOR,
- data_comment=TEST_COMMENT)
- del test_pf
+ try:
+ test_pf = fake_random_pf(64)
+ write_to_gdf(test_pf, tmpfile, data_author=TEST_AUTHOR,
+ data_comment=TEST_COMMENT)
+ del test_pf
+ assert isinstance(load(tmpfile), GDFStaticOutput)
- assert isinstance(load(tmpfile), GDFStaticOutput)
+ h5f = h5.File(tmpfile, 'r')
+ gdf = h5f['gridded_data_format'].attrs
+ assert_equal(gdf['data_author'], TEST_AUTHOR)
+ assert_equal(gdf['data_comment'], TEST_COMMENT)
+ h5f.close()
- h5f = h5.File(tmpfile, 'r')
- gdf = h5f['gridded_data_format'].attrs
- assert_equal(gdf['data_author'], TEST_AUTHOR)
- assert_equal(gdf['data_comment'], TEST_COMMENT)
- h5f.close()
-
- shutil.rmtree(tmpdir)
+ finally:
+ shutil.rmtree(tmpdir)
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -20,36 +20,37 @@
# Create a temporary directory
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
- os.chdir(tmpdir)
+ exit_code = 1
- # Get compiler invocation
- compiler = os.getenv('CC', 'cc')
+ try:
+ os.chdir(tmpdir)
- # Attempt to compile a test script.
- # See http://openmp.org/wp/openmp-compilers/
- filename = r'test.c'
- file = open(filename,'w', 0)
- file.write(
- "#include <omp.h>\n"
- "#include <stdio.h>\n"
- "int main() {\n"
- "#pragma omp parallel\n"
- "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
- "}"
- )
- with open(os.devnull, 'w') as fnull:
- exit_code = subprocess.call([compiler, '-fopenmp', filename],
- stdout=fnull, stderr=fnull)
+ # Get compiler invocation
+ compiler = os.getenv('CC', 'cc')
- # Clean up
- file.close()
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
+ # Attempt to compile a test script.
+ # See http://openmp.org/wp/openmp-compilers/
+ filename = r'test.c'
+ file = open(filename,'w', 0)
+ file.write(
+ "#include <omp.h>\n"
+ "#include <stdio.h>\n"
+ "int main() {\n"
+ "#pragma omp parallel\n"
+ "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
+ "}"
+ )
+ with open(os.devnull, 'w') as fnull:
+ exit_code = subprocess.call([compiler, '-fopenmp', filename],
+ stdout=fnull, stderr=fnull)
- if exit_code == 0:
- return True
- else:
- return False
+ # Clean up
+ file.close()
+ finally:
+ os.chdir(curdir)
+ shutil.rmtree(tmpdir)
+
+ return exit_code == 0
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7798,3 +7798,11 @@
color_map_luts['Rainbow18'] = color_map_luts['idl38']
color_map_luts['Rainbow + white'] = color_map_luts['idl39']
color_map_luts['Rainbow + black'] = color_map_luts['idl40']
+
+# Create a reversed LUT for each of the above defined LUTs
+# and append a "_r" (for reversal. consistent with MPL convention).
+# So for example, the reversal of "Waves" is "Waves_r"
+temp = {}
+for k,v in color_map_luts.iteritems():
+ temp[k+"_r"] = (v[0][::-1], v[1][::-1], v[2][::-1], v[3][::-1])
+color_map_luts.update(temp)
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -29,7 +29,8 @@
"""
from color_maps import \
- add_cmap
+ add_cmap, \
+ show_colormaps
from plot_collection import \
PlotCollection, \
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -145,3 +145,56 @@
b = cmap._lut[:-3, 2]
a = np.ones(b.shape)
return [r, g, b, a]
+
+def show_colormaps(subset = "all", filename=None):
+ """
+ Displays the colormaps available to yt. Note, most functions can use
+ both the matplotlib and the native yt colormaps; however, there are
+ some special functions existing within image_writer.py (e.g. write_image()
+ write_fits(), write_bitmap(), etc.), which cannot access the matplotlib
+ colormaps.
+
+ In addition to the colormaps listed, one can access the reverse of each
+ colormap by appending a "_r" to any map.
+
+ Parameters
+ ----------
+
+ subset : string, opt
+
+ valid values : "all", "yt_native"
+ default : "all"
+
+ As mentioned above, a few functions can only access yt_native
+ colormaps. To display only the yt_native colormaps, set this
+ to "yt_native".
+
+ filename : string, opt
+
+ default: None
+
+ If filename is set, then it will save the colormaps to an output
+ file. If it is not set, it will "show" the result interactively.
+ """
+ import pylab as pl
+
+ a=np.outer(np.arange(0,1,0.01), np.ones(10))
+ if (subset == "all"):
+ maps = [ m for m in pl.cm.datad if (not m.startswith("idl")) & (not m.endswith("_r"))]
+ if (subset == "yt_native"):
+ maps = [ m for m in _cm.color_map_luts if (not m.startswith("idl")) & (not m.endswith("_r"))]
+ maps = list(set(maps))
+ maps.sort()
+ # scale the image size by the number of cmaps
+ pl.figure(figsize=(2.*len(maps)/10.,6))
+ pl.subplots_adjust(top=0.7,bottom=0.05,left=0.01,right=0.99)
+ l = len(maps)+1
+ for i,m in enumerate(maps):
+ pl.subplot(1,l,i+1)
+ pl.axis("off")
+ pl.imshow(a, aspect='auto',cmap=pl.get_cmap(m),origin="lower")
+ pl.title(m,rotation=90, fontsize=10, verticalalignment='bottom')
+ if filename is not None:
+ pl.savefig(filename, dpi=100, facecolor='gray')
+ else:
+ pl.show()
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -45,6 +45,7 @@
sec_per_Gyr, sec_per_Myr, \
sec_per_kyr, sec_per_year, \
sec_per_day, sec_per_hr
+from yt.visualization.image_writer import apply_colormap
import _MPL
@@ -176,7 +177,8 @@
class QuiverCallback(PlotCallback):
"""
- annotate_quiver(field_x, field_y, factor, scale=None, scale_units=None, normalize=False):
+ annotate_quiver(field_x, field_y, factor=16, scale=None, scale_units=None,
+ normalize=False, bv_x=0, bv_y=0):
Adds a 'quiver' plot to any plot, using the *field_x* and *field_y*
from the associated data, skipping every *factor* datapoints
@@ -230,8 +232,8 @@
class ContourCallback(PlotCallback):
"""
- annotate_contour(self, field, ncont=5, factor=4, take_log=None, clim=None,
- plot_args = None):
+ annotate_contour(field, ncont=5, factor=4, take_log=None, clim=None,
+ plot_args=None, label=False, label_args=None):
Add contours in *field* to the plot. *ncont* governs the number of
contours generated, *factor* governs the number of points used in the
@@ -338,18 +340,21 @@
class GridBoundaryCallback(PlotCallback):
"""
- annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
+ annotate_grids(alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True,
+ min_level=None, max_level=None, cmap='B-W LINEAR_r'):
- Adds grid boundaries to a plot, optionally with *alpha*-blending.
- Cuttoff for display is at *min_pix* wide.
- *draw_ids* puts the grid id in the corner of the grid. (Not so great in projections...)
- Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn. If *min_level*
- is specified, only draw grids at or above min_level. If *max_level* is specified, only
- draw grids at or below max_level.
+ Draws grids on an existing PlotWindow object.
+ Adds grid boundaries to a plot, optionally with alpha-blending. By default,
+ colors different levels of grids with different colors going from white to
+ black, but you can change to any arbitrary colormap with cmap keyword
+ (or all black cells for all levels with cmap=None). Cuttoff for display is at
+ min_pix wide. draw_ids puts the grid id in the corner of the grid.
+ (Not so great in projections...). One can set min and maximum level of
+ grids to display.
"""
_type_name = "grids"
- def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True,
- min_level=None, max_level=None):
+ def __init__(self, alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True,
+ min_level=None, max_level=None, cmap='B-W LINEAR_r'):
PlotCallback.__init__(self)
self.alpha = alpha
self.min_pix = min_pix
@@ -358,6 +363,7 @@
self.periodic = periodic
self.min_level = min_level
self.max_level = max_level
+ self.cmap = cmap
def __call__(self, plot):
x0, x1 = plot.xlim
@@ -375,15 +381,16 @@
pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
else:
pxs, pys = np.mgrid[0:0:1j,0:0:1j]
- GLE = plot.data.pf.h.grid_left_edge
- GRE = plot.data.pf.h.grid_right_edge
- grid_levels = plot.data.pf.h.grid_levels[:,0]
+ GLE = plot.data.grid_left_edge
+ GRE = plot.data.grid_right_edge
+ levels = plot.data.grid_levels[:,0]
min_level = self.min_level
- max_level = self.min_level
+ max_level = self.max_level
+ if max_level is None:
+ max_level = plot.data.pf.h.max_level
if min_level is None:
min_level = 0
- if max_level is None:
- max_level = plot.data.pf.h.max_level
+
for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
pxo = px_off * dom[px_index]
pyo = py_off * dom[py_index]
@@ -393,19 +400,28 @@
right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
visible = ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix ) & \
- ( grid_levels >= min_level) & \
- ( grid_levels <= max_level)
+ ( levels >= min_level) & \
+ ( levels <= max_level)
+
+ if self.cmap is not None:
+ edgecolors = apply_colormap(levels[(levels <= max_level) & (levels >= min_level)]*1.0,
+ color_bounds=[0,plot.data.pf.h.max_level],
+ cmap_name=self.cmap)[0,:,:]*1.0/255.
+ edgecolors[:,3] = self.alpha
+ else:
+ edgecolors = (0.0,0.0,0.0,self.alpha)
+
if visible.nonzero()[0].size == 0: continue
verts = np.array(
[(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
(left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
verts=verts.transpose()[visible,:,:]
- edgecolors = (0.0,0.0,0.0,self.alpha)
grid_collection = matplotlib.collections.PolyCollection(
verts, facecolors="none",
edgecolors=edgecolors)
plot._axes.hold(True)
plot._axes.add_collection(grid_collection)
+
if self.draw_ids:
visible_ids = ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -148,15 +148,19 @@
else:
norm = mpl.matplotlib.colors.Normalize()
if use_mesh:
- pcm = axes.pcolormesh(x_bins, y_bins, self.image, norm=norm,
+ mappable = axes.pcolormesh(
+ x_bins, y_bins, self.image, norm=norm,
shading='flat', cmap = self.cbar.cmap,
rasterized=True)
if self.x_spec.scale == 'log': axes.set_xscale("log")
if self.y_spec.scale == 'log': axes.set_yscale("log")
else:
- axes.imshow(self.image, origin='lower', interpolation='nearest',
+ mappable = axes.imshow(
+ self.image, origin='lower', interpolation='nearest',
cmap = self.cbar.cmap, extent = [xmi,xma,ymi,yma],
norm = norm)
+ cbar = figure.colorbar(mappable)
+ cbar.set_label(self.cbar.title)
if self.x_spec.title is not None:
axes.set_xlabel(self.x_spec.title)
if self.y_spec.title is not None:
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -22,9 +22,12 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+import itertools
import os
import tempfile
import shutil
+import unittest
+from yt.extern.parameterized import parameterized, param
from yt.testing import \
fake_random_pf, assert_equal, assert_rel_equal
from yt.utilities.answer_testing.framework import \
@@ -65,132 +68,163 @@
return image_type == os.path.splitext(fname)[1]
-attr_args ={ "pan" : [( ((0.1, 0.1),), {} )],
- "pan_rel" : [( ((0.1, 0.1),), {} )],
- "set_axes_unit" : [( ("kpc",), {} ),
- ( ("Mpc",), {} ),
- ( (("kpc", "kpc"),), {} ),
- ( (("kpc", "Mpc"),), {} )],
- "set_buff_size" : [( (1600,), {} ),
- ( ((600, 800),), {} )],
- "set_center" : [( ((0.4, 0.3),), {} )],
- "set_cmap" : [( ('Density', 'RdBu'), {} ),
- ( ('Density', 'kamae'), {} )],
- "set_font" : [( ({'family':'sans-serif', 'style':'italic',
- 'weight':'bold', 'size':24},), {} )],
- "set_log" : [( ('Density', False), {} )],
- "set_window_size" : [( (7.0,), {} )],
- "set_zlim" : [( ('Density', 1e-25, 1e-23), {} ),
- ( ('Density', 1e-25, None), {'dynamic_range' : 4} )],
- "zoom" : [( (10,), {} )] }
-m7 = "DD0010/moving7_0010"
-wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
- at requires_pf(m7)
- at requires_pf(wt)
+TEST_FLNMS = [None, 'test.png', 'test.eps',
+ 'test.ps', 'test.pdf']
+M7 = "DD0010/moving7_0010"
+WT = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+
+ATTR_ARGS = {"pan": [(((0.1, 0.1), ), {})],
+ "pan_rel": [(((0.1, 0.1), ), {})],
+ "set_axes_unit": [(("kpc", ), {}),
+ (("Mpc", ), {}),
+ ((("kpc", "kpc"),), {}),
+ ((("kpc", "Mpc"),), {})],
+ "set_buff_size": [((1600, ), {}),
+ (((600, 800), ), {})],
+ "set_center": [(((0.4, 0.3), ), {})],
+ "set_cmap": [(('Density', 'RdBu'), {}),
+ (('Density', 'kamae'), {})],
+ "set_font": [(({'family': 'sans-serif', 'style': 'italic',
+ 'weight': 'bold', 'size': 24}, ), {})],
+ "set_log": [(('Density', False), {})],
+ "set_window_size": [((7.0, ), {})],
+ "set_zlim": [(('Density', 1e-25, 1e-23), {}),
+ (('Density', 1e-25, None), {'dynamic_range': 4})],
+ "zoom": [((10, ), {})]}
+
+
+ at requires_pf(M7)
def test_attributes():
"""Test plot member functions that aren't callbacks"""
plot_field = 'Density'
decimals = 3
- pf = data_dir_load(m7)
+ pf = data_dir_load(M7)
for ax in 'xyz':
- for attr_name in attr_args.keys():
- for args in attr_args[attr_name]:
+ for attr_name in ATTR_ARGS.keys():
+ for args in ATTR_ARGS[attr_name]:
yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
args, decimals)
- pf = data_dir_load(wt)
+
+
+ at requires_pf(WT)
+def test_attributes_wt():
+ plot_field = 'Density'
+ decimals = 3
+
+ pf = data_dir_load(WT)
ax = 'z'
- for attr_name in attr_args.keys():
- for args in attr_args[attr_name]:
+ for attr_name in ATTR_ARGS.keys():
+ for args in ATTR_ARGS[attr_name]:
yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
args, decimals)
-def test_setwidth():
- pf = fake_random_pf(64)
- slc = SlicePlot(pf, 0, 'Density')
+class TestSetWidth(unittest.TestCase):
- yield assert_equal, [slc.xlim, slc.ylim, slc.width], \
- [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)]
+ pf = None
- slc.set_width((0.5,0.8))
+ def setUp(self):
+ if self.pf is None:
+ self.pf = fake_random_pf(64)
+ self.slc = SlicePlot(self.pf, 0, 'Density')
- yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
- [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15
+ def _assert_15kpc(self):
+ assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+ [(-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+ (-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+ (15.0 / self.pf['kpc'], 15. / self.pf['kpc'])], 15)
- slc.set_width(15,'kpc')
+ def _assert_15_10kpc(self):
+ assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+ [(-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+ (-5.0 / self.pf['kpc'], 5.0 / self.pf['kpc']),
+ (15.0 / self.pf['kpc'], 10. / self.pf['kpc'])], 15)
- yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
- [(-7.5/pf['kpc'], 7.5/pf['kpc']),
- (-7.5/pf['kpc'], 7.5/pf['kpc']),
- (15/pf['kpc'], 15/pf['kpc'])], 15
+ def test_set_width_one(self):
+ assert_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+ [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)])
- slc.set_width((15,'kpc'))
+ def test_set_width_nonequal(self):
+ self.slc.set_width((0.5, 0.8))
+ assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+ [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15)
- yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
- [(-7.5/pf['kpc'], 7.5/pf['kpc']),
- (-7.5/pf['kpc'], 7.5/pf['kpc']),
- (15/pf['kpc'], 15/pf['kpc'])], 15
+ def test_twoargs_eq(self):
+ self.slc.set_width(15, 'kpc')
+ self._assert_15kpc()
- slc.set_width(((15,'kpc'),(10,'kpc')))
+ def test_tuple_eq(self):
+ self.slc.set_width((15, 'kpc'))
+ self._assert_15kpc()
- yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
- [(-7.5/pf['kpc'], 7.5/pf['kpc']),
- (-5/pf['kpc'], 5/pf['kpc']),
- (15/pf['kpc'], 10/pf['kpc'])], 15
+ def test_tuple_of_tuples_neq(self):
+ self.slc.set_width(((15, 'kpc'), (10, 'kpc')))
+ self._assert_15_10kpc()
- slc.set_width(((15,'kpc'),(10000,'pc')))
+ def test_tuple_of_tuples_neq2(self):
+ self.slc.set_width(((15, 'kpc'), (10000, 'pc')))
+ self._assert_15_10kpc()
- yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
- [(-7.5/pf['kpc'], 7.5/pf['kpc']),
- (-5/pf['kpc'], 5/pf['kpc']),
- (15/pf['kpc'], 10/pf['kpc'])], 15
+ def test_pair_of_tuples_neq(self):
+ self.slc.set_width((15, 'kpc'), (10000, 'pc'))
+ self._assert_15_10kpc()
- slc.set_width((15,'kpc'),(10000,'pc'))
- yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
- [(-7.5/pf['kpc'], 7.5/pf['kpc']),
- (-5/pf['kpc'], 5/pf['kpc']),
- (15/pf['kpc'], 10/pf['kpc'])], 15
+class TestPlotWindowSave(unittest.TestCase):
-def test_save():
- """Test plot window creation and saving to disk."""
- # Perform I/O in safe place instead of yt main dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
+ @classmethod
+ def setUpClass(cls):
+ test_pf = fake_random_pf(64)
+ normal = [1, 1, 1]
+ ds_region = test_pf.h.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
+ projections = []
+ projections_ds = []
+ for dim in range(3):
+ projections.append(ProjectionPlot(test_pf, dim, 'Density'))
+ projections_ds.append(ProjectionPlot(test_pf, dim, 'Density',
+ data_source=ds_region))
- normal = [1, 1, 1]
+ cls.slices = [SlicePlot(test_pf, dim, 'Density') for dim in range(3)]
+ cls.projections = projections
+ cls.projections_ds = projections_ds
+ cls.offaxis_slice = OffAxisSlicePlot(test_pf, normal, 'Density')
+ cls.offaxis_proj = OffAxisProjectionPlot(test_pf, normal, 'Density')
- test_pf = fake_random_pf(64)
- test_flnms = [None, 'test.png', 'test.eps',
- 'test.ps', 'test.pdf']
+ def setUp(self):
+ self.tmpdir = tempfile.mkdtemp()
+ self.curdir = os.getcwd()
+ os.chdir(self.tmpdir)
- ds_region = test_pf.h.region([0.5]*3,[0.4]*3,[0.6]*3)
+ def tearDown(self):
+ os.chdir(self.curdir)
+ shutil.rmtree(self.tmpdir)
- for dim in [0, 1, 2]:
- obj = SlicePlot(test_pf, dim, 'Density')
- for fname in test_flnms:
- yield assert_equal, assert_fname(obj.save(fname)[0]), True
+ @parameterized.expand(
+ param.explicit(item)
+ for item in itertools.product(range(3), TEST_FLNMS))
+ def test_slice_plot(self, dim, fname):
+ assert assert_fname(self.slices[dim].save(fname)[0])
- for dim in [0, 1, 2]:
- obj = ProjectionPlot(test_pf, dim, 'Density')
- for fname in test_flnms:
- yield assert_equal, assert_fname(obj.save(fname)[0]), True
- # Test ProjectionPlot's data_source keyword
- obj = ProjectionPlot(test_pf, dim, 'Density',
- data_source=ds_region)
- obj.save()
+ @parameterized.expand(
+ param.explicit(item)
+ for item in itertools.product(range(3), TEST_FLNMS))
+ def test_projection_plot(self, dim, fname):
+ assert assert_fname(self.projections[dim].save(fname)[0])
- obj = OffAxisSlicePlot(test_pf, normal, 'Density')
- for fname in test_flnms:
- yield assert_equal, assert_fname(obj.save(fname)[0]), True
+ @parameterized.expand([(0, ), (1, ), (2, )])
+ def test_projection_plot_ds(self, dim):
+ self.projections_ds[dim].save()
- obj = OffAxisProjectionPlot(test_pf, normal, 'Density')
- for fname in test_flnms:
- yield assert_equal, assert_fname(obj.save(fname)[0]), True
+ @parameterized.expand(
+ param.explicit((fname, ))
+ for fname in TEST_FLNMS)
+ def test_offaxis_slice_plot(self, fname):
+ assert assert_fname(self.offaxis_slice.save(fname)[0])
- os.chdir(curdir)
- # clean up
- shutil.rmtree(tmpdir)
+ @parameterized.expand(
+ param.explicit((fname, ))
+ for fname in TEST_FLNMS)
+ def test_offaxis_projection_plot(self, fname):
+ assert assert_fname(self.offaxis_proj.save(fname)[0])
diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -237,7 +237,7 @@
max_level=None):
r"""Draws Grids on an existing volume rendering.
- By mapping grid level to a color, drawes edges of grids on
+ By mapping grid level to a color, draws edges of grids on
a volume rendering using the camera orientation.
Parameters
https://bitbucket.org/yt_analysis/yt-3.0/commits/93dc5368d870/
Changeset: 93dc5368d870
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-28 20:47:04
Summary: This file was missed when I did my last commit.
Affected #: 1 file
diff -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 -r 93dc5368d870288a369447061324a74201beccf3 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -55,6 +55,7 @@
domain_id = 2
_con_args = ("base_region", "sfc_start", "sfc_end", "pf")
_type_name = 'octree_subset'
+ _num_zones = 2
def __init__(self, base_region, sfc_start, sfc_end, pf):
self.field_data = YTFieldData()
https://bitbucket.org/yt_analysis/yt-3.0/commits/805b5cbdf075/
Changeset: 805b5cbdf075
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-28 22:30:02
Summary: Adding tests for over_refine.
Affected #: 1 file
diff -r 93dc5368d870288a369447061324a74201beccf3 -r 805b5cbdf075af44a4e6580de2d706984f68ebaa yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -59,6 +59,35 @@
v = np.bincount(bi.astype("int64"))
yield assert_equal, v.max() <= n_ref, True
+def test_particle_overrefine():
+ np.random.seed(int(0x4d3d3d3))
+ pos = []
+ data = {}
+ bbox = []
+ for i, ax in enumerate('xyz'):
+ DW = DRE[i] - DLE[i]
+ LE = DLE[i]
+ data["particle_position_%s" % ax] = \
+ np.random.normal(0.5, scale=0.05, size=(NPART)) * DW + LE
+ bbox.append( [DLE[i], DRE[i]] )
+ bbox = np.array(bbox)
+ _attrs = ('icoords', 'fcoords', 'fwidth', 'ires')
+ for n_ref in [16, 32, 64, 512, 1024]:
+ pf1 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
+ dd1 = pf1.h.all_data()
+ v1 = dict((a, getattr(dd1, a)) for a in _attrs)
+ cv1 = dd1["CellVolumeCode"].sum(dtype="float64")
+ for over_refine in [1, 2, 3]:
+ f = 1 << (3*(over_refine-1))
+ pf2 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref,
+ over_refine_factor = over_refine)
+ dd2 = pf2.h.all_data()
+ v2 = dict((a, getattr(dd2, a)) for a in _attrs)
+ for a in sorted(v1):
+ yield assert_equal, v1[a].size * f, v2[a].size
+ cv2 = dd2["CellVolumeCode"].sum(dtype="float64")
+ yield assert_equal, cv1, cv2
+
if __name__=="__main__":
for i in test_add_particles_random():
i[0](*i[1:])
https://bitbucket.org/yt_analysis/yt-3.0/commits/46f1d93ecd71/
Changeset: 46f1d93ecd71
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-29 14:06:02
Summary: Adding over_refine_factor to Tipsy.
Affected #: 1 file
diff -r 805b5cbdf075af44a4e6580de2d706984f68ebaa -r 46f1d93ecd71f9d4e8364431a0a46d1818a2158f yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -371,8 +371,9 @@
unit_base = None,
cosmology_parameters = None,
parameter_file = None,
- n_ref = 64):
+ n_ref = 64, over_refine_factor = 1):
self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
self.endian = endian
self.storage_filename = None
if domain_left_edge is None:
https://bitbucket.org/yt_analysis/yt-3.0/commits/f7704b577541/
Changeset: f7704b577541
Branch: yt-3.0
User: MatthewTurk
Date: 2013-08-29 14:30:49
Summary: Adding periodicity to r2dist and process_octree for smoothing.
Affected #: 3 files
diff -r 46f1d93ecd71f9d4e8364431a0a46d1818a2158f -r f7704b57754127235bbc5dca4b39449b50ce5feb yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -159,7 +159,7 @@
mylog.debug("Smoothing %s particles into %s Octs",
positions.shape[0], nvals[-1])
op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
- self.domain_id, self._domain_offset)
+ self.domain_id, self._domain_offset, self.pf.periodicity)
vals = op.finalize()
if vals is None: return
if isinstance(vals, list):
diff -r 46f1d93ecd71f9d4e8364431a0a46d1818a2158f -r f7704b57754127235bbc5dca4b39449b50ce5feb yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -45,7 +45,8 @@
cdef inline np.float64_t r2dist(np.float64_t ppos[3],
np.float64_t cpos[3],
- np.float64_t DW[3]):
+ np.float64_t DW[3],
+ bint periodicity[3]):
cdef int i
cdef np.float64_t r2, DR
r2 = 0.0
@@ -65,6 +66,7 @@
cdef int nfields
cdef int maxn
cdef int curn
+ cdef bint periodicity[3]
cdef np.int64_t *doffs
cdef np.int64_t *pinds
cdef np.int64_t *pcounts
diff -r 46f1d93ecd71f9d4e8364431a0a46d1818a2158f -r f7704b57754127235bbc5dca4b39449b50ce5feb yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -71,7 +71,7 @@
np.ndarray[np.float64_t, ndim=2] positions,
fields = None, int domain_id = -1,
int domain_offset = 0,
- int test_neighbors = 0):
+ periodicity = (True, True, True)):
# This will be a several-step operation.
#
# We first take all of our particles and assign them to Octs. If they
@@ -127,6 +127,7 @@
field_pointers[i] = <np.float64_t *> tarr.data
for i in range(3):
self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+ self.periodicity[i] = periodicity[i]
for i in range(positions.shape[0]):
for j in range(3):
pos[j] = positions[i, j]
@@ -229,7 +230,7 @@
if self.curn < self.maxn:
cur = &self.neighbors[self.curn]
cur.pn = pn
- cur.r2 = r2dist(ppos, cpos, self.DW)
+ cur.r2 = r2dist(ppos, cpos, self.DW, self.periodicity)
self.curn += 1
if self.curn == self.maxn:
# This time we sort it, so that future insertions will be able
@@ -238,7 +239,7 @@
Neighbor_compare)
return
# This will go (curn - 1) through 0.
- r2_c = r2dist(ppos, cpos, self.DW)
+ r2_c = r2dist(ppos, cpos, self.DW, self.periodicity)
pn_c = pn
for i in range((self.curn - 1), -1, -1):
# First we evaluate against i. If our candidate radius is greater
https://bitbucket.org/yt_analysis/yt-3.0/commits/0a57d1a9fb4c/
Changeset: 0a57d1a9fb4c
Branch: yt-3.0
User: ngoldbaum
Date: 2013-09-06 17:48:46
Summary: Merged in MatthewTurk/yt-3.0 (pull request #86)
Oct cell count generalization and initial particle smoothing operations
Affected #: 18 files
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,12 +36,12 @@
NeedsProperty, \
NeedsParameter
import yt.geometry.particle_deposit as particle_deposit
+import yt.geometry.particle_smooth as particle_smooth
from yt.funcs import *
class OctreeSubset(YTSelectionContainer):
_spatial = True
_num_ghost_zones = 0
- _num_zones = 2
_type_name = 'octree_subset'
_skip_add = True
_con_args = ('base_region', 'domain', 'pf')
@@ -49,7 +49,8 @@
_domain_offset = 0
_num_octs = -1
- def __init__(self, base_region, domain, pf):
+ def __init__(self, base_region, domain, pf, over_refine_factor = 1):
+ self._num_zones = 1 << (over_refine_factor)
self.field_data = YTFieldData()
self.field_parameters = {}
self.domain = domain
@@ -145,6 +146,28 @@
if vals is None: return
return np.asfortranarray(vals)
+ def smooth(self, positions, fields = None, method = None):
+ # Here we perform our particle deposition.
+ cls = getattr(particle_smooth, "%s_smooth" % method, None)
+ if cls is None:
+ raise YTParticleDepositionNotImplemented(method)
+ nz = self.nz
+ nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
+ if fields is None: fields = []
+ op = cls(nvals, len(fields), 64)
+ op.initialize()
+ mylog.debug("Smoothing %s particles into %s Octs",
+ positions.shape[0], nvals[-1])
+ op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+ self.domain_id, self._domain_offset, self.pf.periodicity)
+ vals = op.finalize()
+ if vals is None: return
+ if isinstance(vals, list):
+ vals = [np.asfortranarray(v) for v in vals]
+ else:
+ vals = np.asfortranarray(vals)
+ return vals
+
def select_icoords(self, dobj):
d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
@@ -206,8 +229,10 @@
_type_name = 'indexed_octree_subset'
_con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
domain_id = -1
- def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+ def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
+ over_refine_factor = 1):
# The first attempt at this will not work in parallel.
+ self._num_zones = 1 << (over_refine_factor)
self.data_files = data_files
self.field_data = YTFieldData()
self.field_parameters = {}
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -55,6 +55,7 @@
domain_id = 2
_con_args = ("base_region", "sfc_start", "sfc_end", "pf")
_type_name = 'octree_subset'
+ _num_zones = 2
def __init__(self, base_region, sfc_start, sfc_end, pf):
self.field_data = YTFieldData()
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,6 +96,7 @@
class ParticleStaticOutput(StaticOutput):
_unit_base = None
+ over_refine_factor = 1
def _set_units(self):
self.units = {}
@@ -154,8 +155,10 @@
def __init__(self, filename, data_style="gadget_binary",
additional_fields = (),
- unit_base = None, n_ref = 64):
+ unit_base = None, n_ref = 64,
+ over_refine_factor = 1):
self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
@@ -188,7 +191,8 @@
self.domain_left_edge = np.zeros(3, "float64")
self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
- self.domain_dimensions = np.ones(3, "int32") * 2
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
self.periodicity = (True, True, True)
self.cosmological_simulation = 1
@@ -268,11 +272,13 @@
_particle_coordinates_name = "Coordinates"
_header_spec = None # Override so that there's no confusion
- def __init__(self, filename, data_style="OWLS", n_ref = 64):
+ def __init__(self, filename, data_style="OWLS", n_ref = 64,
+ over_refine_factor = 1):
self.storage_filename = None
- super(OWLSStaticOutput, self).__init__(filename, data_style,
- unit_base = None,
- n_ref = n_ref)
+ super(OWLSStaticOutput, self).__init__(
+ filename, data_style,
+ unit_base = None, n_ref = n_ref,
+ over_refine_factor = over_refine_factor)
def __repr__(self):
return os.path.basename(self.parameter_filename).split(".")[0]
@@ -292,7 +298,8 @@
self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
self.domain_left_edge = np.zeros(3, "float64")
self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
- self.domain_dimensions = np.ones(3, "int32") * 2
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
self.cosmological_simulation = 1
self.periodicity = (True, True, True)
self.current_redshift = hvals["Redshift"]
@@ -364,8 +371,9 @@
unit_base = None,
cosmology_parameters = None,
parameter_file = None,
- n_ref = 64):
+ n_ref = 64, over_refine_factor = 1):
self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
self.endian = endian
self.storage_filename = None
if domain_left_edge is None:
@@ -438,7 +446,8 @@
self.parameters[param] = val
self.current_time = hvals["time"]
- self.domain_dimensions = np.ones(3, "int32") * 2
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
if self.parameters.get('bPeriodic', True):
self.periodicity = (True, True, True)
else:
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -738,10 +738,11 @@
file_count = 1
filename_template = "stream_file"
n_ref = 64
+ over_refine_factor = 1
def load_particles(data, sim_unit_to_cm, bbox=None,
sim_time=0.0, periodicity=(True, True, True),
- n_ref = 64):
+ n_ref = 64, over_refine_factor = 1):
r"""Load a set of particles into yt as a
:class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
@@ -828,6 +829,7 @@
spf = StreamParticlesStaticOutput(handler)
spf.n_ref = n_ref
+ spf.over_refine_factor = over_refine_factor
spf.units["cm"] = sim_unit_to_cm
spf.units['1'] = 1.0
spf.units["unitary"] = 1.0
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -40,6 +40,8 @@
cdef struct OctInfo:
np.float64_t left_edge[3]
np.float64_t dds[3]
+ np.int64_t ipos[3]
+ np.int32_t level
cdef struct OctAllocationContainer
cdef struct OctAllocationContainer:
@@ -49,6 +51,16 @@
OctAllocationContainer *next
Oct *my_octs
+cdef struct OctList
+
+cdef struct OctList:
+ OctList *next
+ Oct *o
+
+cdef OctList *OctList_append(OctList *list, Oct *o)
+cdef int OctList_count(OctList *list)
+cdef void OctList_delete(OctList *list)
+
cdef class OctreeContainer:
cdef OctAllocationContainer *cont
cdef OctAllocationContainer **domains
@@ -56,12 +68,13 @@
cdef oct_visitor_function *fill_func
cdef int partial_coverage
cdef int nn[3]
+ cdef np.uint8_t oref
cdef np.float64_t DLE[3], DRE[3]
cdef public np.int64_t nocts
cdef public int max_domain
cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
cdef int get_root(self, int ind[3], Oct **o)
- cdef void neighbors(self, Oct *, Oct **)
+ cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors)
cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
# This function must return the offset from global-to-local domains; i.e.,
# OctAllocationContainer.offset if such a thing exists.
@@ -71,6 +84,7 @@
OctVisitorData *data)
cdef Oct *next_root(self, int domain_id, int ind[3])
cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
+ cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
cdef class SparseOctreeContainer(OctreeContainer):
cdef OctKey *root_nodes
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -96,8 +96,10 @@
cdef class OctreeContainer:
def __init__(self, oct_domain_dimensions, domain_left_edge,
- domain_right_edge, partial_coverage = 0):
+ domain_right_edge, partial_coverage = 0,
+ over_refine = 1):
# This will just initialize the root mesh octs
+ self.oref = over_refine
self.partial_coverage = partial_coverage
cdef int i, j, k, p
for i in range(3):
@@ -120,6 +122,21 @@
for k in range(self.nn[2]):
self.root_mesh[i][j][k] = NULL
+ cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
+ cdef int i
+ data.index = 0
+ data.last = -1
+ data.global_index = -1
+ for i in range(3):
+ data.pos[i] = -1
+ data.ind[i] = -1
+ data.array = NULL
+ data.dims = 0
+ data.domain = domain_id
+ data.level = -1
+ data.oref = self.oref
+ data.nz = (1 << (data.oref*3))
+
def __dealloc__(self):
free_octs(self.cont)
if self.root_mesh == NULL: return
@@ -185,27 +202,39 @@
return 0
cdef int get_root(self, int ind[3], Oct **o):
+ cdef int i
+ for i in range(3):
+ if ind[i] < 0 or ind[i] >= self.nn[i]:
+ o[0] = NULL
+ return 1
o[0] = self.root_mesh[ind[0]][ind[1]][ind[2]]
- return 1
+ return 0
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
+ cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
+ ):
#Given a floating point position, retrieve the most
#refined oct at that time
- cdef int ind[3]
+ cdef int ind[3], level
+ cdef np.int64_t ipos[3]
cdef np.float64_t dds[3], cp[3], pp[3]
cdef Oct *cur, *next
+ cdef int i
cur = next = NULL
- cdef int i
+ level = -1
for i in range(3):
dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+ ipos[i] = 0
self.get_root(ind, &next)
# We want to stop recursing when there's nowhere else to go
while next != NULL:
+ level += 1
+ for i in range(3):
+ ipos[i] = (ipos[i] << 1) + ind[i]
cur = next
for i in range(3):
dds[i] = dds[i] / 2.0
@@ -227,18 +256,22 @@
cp[i] -= dds[i]/2.0 # Now centered
else:
cp[i] += dds[i]/2.0
- # We don't need to change dds[i] as it has been halved from the
- # oct width, thus making it already the cell width
- oinfo.dds[i] = dds[i] # Cell width
+ # We don't normally need to change dds[i] as it has been halved
+ # from the oct width, thus making it already the cell width.
+ # But, for some cases where the oref != 1, this needs to be
+ # changed.
+ oinfo.dds[i] = dds[i] / (1 << (self.oref-1)) # Cell width
oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
+ oinfo.ipos[i] = ipos[i]
+ oinfo.level = level
return cur
def domain_identify(self, SelectorObject selector):
cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
domain_mask = np.zeros(self.max_domain, dtype="uint8")
cdef OctVisitorData data
+ self.setup_data(&data)
data.array = domain_mask.data
- data.domain = -1
self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
cdef int i
domain_ids = []
@@ -250,99 +283,69 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef void neighbors(self, Oct* o, Oct* neighbors[27]):
- #Get 3x3x3 neighbors, although the 1,1,1 oct is the
- #central one.
- #Return an array of Octs
- cdef np.int64_t curopos[3]
- cdef np.int64_t curnpos[3]
- cdef np.int64_t npos[3]
- cdef int i, j, k, ni, nj, nk, ind[3], nn, dl, skip
- cdef np.float64_t dds[3], cp[3], pp[3]
+ cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors):
cdef Oct* candidate
- for i in range(27): neighbors[i] = NULL
nn = 0
- raise RuntimeError
- #for ni in range(3):
- # for nj in range(3):
- # for nk in range(3):
- # if ni == nj == nk == 1:
- # neighbors[nn] = o
- # nn += 1
- # continue
- # npos[0] = o.pos[0] + (ni - 1)
- # npos[1] = o.pos[1] + (nj - 1)
- # npos[2] = o.pos[2] + (nk - 1)
- # for i in range(3):
- # # Periodicity
- # if npos[i] == -1:
- # npos[i] = (self.nn[i] << o.level) - 1
- # elif npos[i] == (self.nn[i] << o.level):
- # npos[i] = 0
- # curopos[i] = o.pos[i]
- # curnpos[i] = npos[i]
- # # Now we have our neighbor position and a safe place to
- # # keep it. curnpos will be the root index of the neighbor
- # # at a given level, and npos will be constant. curopos is
- # # the candidate root at a level.
- # candidate = o
- # while candidate != NULL:
- # if ((curopos[0] == curnpos[0]) and
- # (curopos[1] == curnpos[1]) and
- # (curopos[2] == curnpos[2])):
- # break
- # # This one doesn't meet it, so we pop up a level.
- # # First we update our positions, then we update our
- # # candidate.
- # for i in range(3):
- # # We strip a digit off the right
- # curopos[i] = (curopos[i] >> 1)
- # curnpos[i] = (curnpos[i] >> 1)
- # # Now we update to the candidate's parent, which should
- # # have a matching position to curopos[]
- # # TODO: This has not survived the transition to
- # # mostly-stateless Octs!
- # raise RuntimeError
- # candidate = candidate.parent
- # if candidate == NULL:
- # # Worst case scenario
- # for i in range(3):
- # ind[i] = (npos[i] >> (o.level))
- # candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
- # # Now we have the common root, which may be NULL
- # while candidate.level < o.level:
- # dl = o.level - (candidate.level + 1)
- # for i in range(3):
- # ind[i] = (npos[i] >> dl) & 1
- # if candidate.children[cind(ind[0],ind[1],ind[2])] \
- # == NULL:
- # break
- # candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
- # neighbors[nn] = candidate
- # nn += 1
+ # We are going to do a brute-force search here.
+ # This is not the most efficient -- in fact, it's relatively bad. But
+ # we will attempt to improve it in a future iteration, where we will
+ # grow a stack of parent Octs.
+ # Note that in the first iteration, we will just find the up-to-27
+ # neighbors, including the main oct.
+ cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+ cdef OctList *olist, *my_list
+ my_list = olist = NULL
+ cdef Oct *cand
+ cdef np.int64_t npos[3], ndim[3]
+ # Now we get our boundaries for this level, so that we can wrap around
+ # if need be.
+ # ndim is the oct dimensions of the level, not the cell dimensions.
+ for i in range(3):
+ ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+ ndim[i] = (ndim[i] >> self.oref)
+ for i in range(3):
+ npos[0] = (oi.ipos[0] + (1 - i))
+ if npos[0] < 0: npos[0] += ndim[0]
+ if npos[0] >= ndim[0]: npos[0] -= ndim[0]
+ for j in range(3):
+ npos[1] = (oi.ipos[1] + (1 - j))
+ if npos[1] < 0: npos[1] += ndim[1]
+ if npos[1] >= ndim[1]: npos[1] -= ndim[1]
+ for k in range(3):
+ npos[2] = (oi.ipos[2] + (1 - k))
+ if npos[2] < 0: npos[2] += ndim[2]
+ if npos[2] >= ndim[2]: npos[2] -= ndim[2]
+ # Now we have our npos, which we just need to find.
+ # Level 0 gets bootstrapped
+ for n in range(3):
+ ind[n] = ((npos[n] >> (oi.level)) & 1)
+ cand = NULL
+ self.get_root(ind, &cand)
+ # We should not get a NULL if we handle periodicity
+ # correctly, but we might.
+ if cand == NULL: continue
+ for level in range(1, oi.level+1):
+ if cand.children == NULL: break
+ for n in range(3):
+ ind[n] = (npos[n] >> (oi.level - (level))) & 1
+ ii = cind(ind[0],ind[1],ind[2])
+ if cand.children[ii] == NULL: break
+ cand = cand.children[ii]
+ if cand != NULL:
+ nfound += 1
+ olist = OctList_append(olist, cand)
+ if my_list == NULL: my_list = olist
- @cython.boundscheck(False)
- @cython.wraparound(False)
- @cython.cdivision(True)
- def get_neighbor_boundaries(self, oppos):
- cdef int i, ii
- cdef np.float64_t ppos[3]
- for i in range(3):
- ppos[i] = oppos[i]
- cdef Oct *main = self.get(ppos)
- cdef Oct* neighbors[27]
- self.neighbors(main, neighbors)
- cdef np.ndarray[np.float64_t, ndim=2] bounds
- cdef np.float64_t corner[3], size[3]
- bounds = np.zeros((27,6), dtype="float64")
- tnp = 0
- raise RuntimeError
- for i in range(27):
- self.oct_bounds(neighbors[i], corner, size)
- for ii in range(3):
- bounds[i, ii] = corner[ii]
- bounds[i, 3+ii] = size[ii]
- return bounds
+ olist = my_list
+ cdef int noct = OctList_count(olist)
+ cdef Oct **neighbors
+ neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
+ for i in range(noct):
+ neighbors[i] = olist.o
+ olist = olist.next
+ OctList_delete(my_list)
+ nneighbors[0] = noct
+ return neighbors
@cython.boundscheck(False)
@cython.wraparound(False)
@@ -352,11 +355,10 @@
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
cdef np.ndarray[np.uint8_t, ndim=1] coords
- coords = np.zeros((num_octs * 8), dtype="uint8")
cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
+ coords = np.zeros((num_octs * data.nz), dtype="uint8")
data.array = <void *> coords.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
return coords.astype("bool")
@@ -367,12 +369,11 @@
int domain_id = -1):
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
+ cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
cdef np.ndarray[np.int64_t, ndim=2] coords
- coords = np.empty((num_octs * 8, 3), dtype="int64")
- cdef OctVisitorData data
+ coords = np.empty((num_octs * data.nz, 3), dtype="int64")
data.array = <void *> coords.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
return coords
@@ -383,13 +384,12 @@
int domain_id = -1):
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
+ cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
#Return the 'resolution' of each cell; ie the level
cdef np.ndarray[np.int64_t, ndim=1] res
- res = np.empty(num_octs * 8, dtype="int64")
- cdef OctVisitorData data
+ res = np.empty(num_octs * data.nz, dtype="int64")
data.array = <void *> res.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
return res
@@ -400,12 +400,11 @@
int domain_id = -1):
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
+ cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
cdef np.ndarray[np.float64_t, ndim=2] fwidth
- fwidth = np.empty((num_octs * 8, 3), dtype="float64")
- cdef OctVisitorData data
+ fwidth = np.empty((num_octs * data.nz, 3), dtype="float64")
data.array = <void *> fwidth.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
cdef np.float64_t base_dx
for i in range(3):
@@ -420,13 +419,12 @@
int domain_id = -1):
if num_octs == -1:
num_octs = selector.count_octs(self, domain_id)
+ cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
#Return the floating point unitary position of every cell
cdef np.ndarray[np.float64_t, ndim=2] coords
- coords = np.empty((num_octs * 8, 3), dtype="float64")
- cdef OctVisitorData data
+ coords = np.empty((num_octs * data.nz, 3), dtype="float64")
data.array = <void *> coords.data
- data.index = 0
- data.domain = domain_id
self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
cdef int i
cdef np.float64_t base_dx
@@ -456,8 +454,8 @@
else:
dest = np.zeros(num_cells, dtype=source.dtype, order='C')
cdef OctVisitorData data
+ self.setup_data(&data, domain_id)
data.index = offset
- data.domain = domain_id
# We only need this so we can continue calculating the offset
data.dims = dims
cdef void *p[2]
@@ -474,14 +472,16 @@
else:
raise NotImplementedError
self.visit_all_octs(selector, func, &data)
- if (data.global_index + 1) * 8 * data.dims > source.size:
+ if (data.global_index + 1) * data.nz * data.dims > source.size:
print "GLOBAL INDEX RAN AHEAD.",
- print (data.global_index + 1) * 8 * data.dims - source.size
+ print (data.global_index + 1) * data.nz * data.dims - source.size
print dest.size, source.size, num_cells
raise RuntimeError
if data.index > dest.size:
print "DEST INDEX RAN AHEAD.",
print data.index - dest.size
+ print (data.global_index + 1) * data.nz * data.dims, source.size
+ print num_cells
raise RuntimeError
if num_cells >= 0:
return dest
@@ -492,10 +492,8 @@
# Here's where we grab the masked items.
ind = np.zeros(self.nocts, 'int64') - 1
cdef OctVisitorData data
- data.domain = domain_id
+ self.setup_data(&data, domain_id)
data.array = ind.data
- data.index = 0
- data.last = -1
self.visit_all_octs(selector, oct_visitors.index_octs, &data)
return ind
@@ -578,6 +576,7 @@
if parent.children != NULL:
next = parent.children[cind(ind[0],ind[1],ind[2])]
else:
+ # This *8 does NOT need to be made generic.
parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
for i in range(8):
parent.children[i] = NULL
@@ -607,13 +606,12 @@
file_inds[i] = -1
cell_inds[i] = 9
cdef OctVisitorData data
- data.index = 0
+ self.setup_data(&data, domain_id)
cdef void *p[3]
p[0] = levels.data
p[1] = file_inds.data
p[2] = cell_inds.data
data.array = p
- data.domain = domain_id
self.visit_all_octs(selector, self.fill_func, &data)
return levels, cell_inds, file_inds
@@ -641,10 +639,9 @@
def finalize(self):
cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
cdef OctVisitorData data
- data.index = 0
- data.domain = 1
+ self.setup_data(&data, 1)
self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
- assert ((data.global_index+1)*8 == data.index)
+ assert ((data.global_index+1)*data.nz == data.index)
cdef int root_node_compare(void *a, void *b) nogil:
cdef OctKey *ao, *bo
@@ -659,9 +656,11 @@
cdef class SparseOctreeContainer(OctreeContainer):
- def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
+ def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge,
+ over_refine = 1):
cdef int i, j, k, p
self.partial_coverage = 1
+ self.oref = over_refine
for i in range(3):
self.nn[i] = domain_dimensions[i]
self.max_domain = -1
@@ -807,3 +806,33 @@
dest[local_filled + offset] = source[ox,oy,oz]
local_filled += 1
return local_filled
+
+cdef OctList *OctList_append(OctList *olist, Oct *o):
+ cdef OctList *this = olist
+ if this == NULL:
+ this = <OctList *> malloc(sizeof(OctList))
+ this.next = NULL
+ this.o = o
+ return this
+ while this.next != NULL:
+ this = this.next
+ this.next = <OctList*> malloc(sizeof(OctList))
+ this = this.next
+ this.o = o
+ this.next = NULL
+ return this
+
+cdef int OctList_count(OctList *olist):
+ cdef OctList *this = olist
+ cdef int i = 0 # Count the list
+ while this != NULL:
+ i += 1
+ this = this.next
+ return i
+
+cdef void OctList_delete(OctList *olist):
+ cdef OctList *next, *this = olist
+ while this != NULL:
+ next = this.next
+ free(this)
+ this = next
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -3,7 +3,7 @@
Author: Matthew Turk <matthewturk at gmail.com>
Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
License:
Copyright (C) 2013 Matthew Turk. All Rights Reserved.
@@ -43,6 +43,10 @@
int dims
np.int32_t domain
np.int8_t level
+ np.int8_t oref # This is the level of overref. 1 => 8 zones, 2 => 64, etc.
+ # To calculate nzones, 1 << (oref * 3)
+ np.int32_t nz
+
ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
np.uint8_t selected)
@@ -64,10 +68,13 @@
cdef oct_visitor_function fill_file_indices_rind
cdef inline int cind(int i, int j, int k):
+ # THIS ONLY WORKS FOR CHILDREN. It is not general for zones.
return (((i*2)+j)*2+k)
cdef inline int oind(OctVisitorData *data):
- return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
+ cdef int d = (1 << data.oref)
+ return (((data.ind[0]*d)+data.ind[1])*d+data.ind[2])
cdef inline int rind(OctVisitorData *data):
- return (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])
+ cdef int d = (1 << data.oref)
+ return (((data.ind[2]*d)+data.ind[1])*d+data.ind[0])
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,7 +38,7 @@
if selected == 0: return
cdef int i
# There are this many records between "octs"
- cdef np.int64_t index = (data.global_index * 8)*data.dims
+ cdef np.int64_t index = (data.global_index * data.nz)*data.dims
cdef np.float64_t **p = <np.float64_t**> data.array
index += oind(data)*data.dims
for i in range(data.dims):
@@ -50,7 +50,7 @@
# "last" here tells us the dimensionality of the array.
if selected == 0: return
cdef int i
- cdef np.int64_t index = (data.global_index * 8)*data.dims
+ cdef np.int64_t index = (data.global_index * data.nz)*data.dims
cdef np.int64_t **p = <np.int64_t**> data.array
index += oind(data)*data.dims
for i in range(data.dims):
@@ -75,7 +75,7 @@
if data.last != o.domain_ind:
data.last = o.domain_ind
data.index += 1
- cdef np.int64_t index = data.index * 8
+ cdef np.int64_t index = data.index * data.nz
index += oind(data)
arr[index] = 1
@@ -83,7 +83,7 @@
if selected == 0: return
cdef int i
cdef np.uint8_t *arr = <np.uint8_t *> data.array
- cdef np.int64_t index = data.global_index * 8
+ cdef np.int64_t index = data.global_index * data.nz
index += oind(data)
arr[index] = 1
@@ -102,7 +102,7 @@
cdef np.int64_t *coords = <np.int64_t*> data.array
cdef int i
for i in range(3):
- coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
+ coords[data.index * 3 + i] = (data.pos[i] << data.oref) + data.ind[i]
data.index += 1
cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -120,9 +120,9 @@
cdef np.float64_t *fcoords = <np.float64_t*> data.array
cdef int i
cdef np.float64_t c, dx
- dx = 1.0 / (2 << data.level)
+ dx = 1.0 / ((1 << data.oref) << data.level)
for i in range(3):
- c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i])
+ c = <np.float64_t> ((data.pos[i] << data.oref ) + data.ind[i])
fcoords[data.index * 3 + i] = (c + 0.5) * dx
data.index += 1
@@ -135,7 +135,7 @@
cdef np.float64_t *fwidth = <np.float64_t*> data.array
cdef int i
cdef np.float64_t dx
- dx = 1.0 / (2 << data.level)
+ dx = 1.0 / ((1 << data.oref) << data.level)
for i in range(3):
fwidth[data.index * 3 + i] = dx
data.index += 1
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -5,7 +5,7 @@
Affiliation: UC Santa Cruz
Author: Matthew Turk <matthewturk at gmail.com>
Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
License:
Copyright (C) 2013 Matthew Turk. All Rights Reserved.
@@ -32,7 +32,7 @@
from libc.math cimport sqrt
from fp_utils cimport *
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
cdef extern from "alloca.h":
void *alloca(int)
@@ -62,7 +62,6 @@
cdef class ParticleDepositOperation:
# We assume each will allocate and define their own temporary storage
cdef public object nvals
- cdef public int bad_indices
cdef public int update_values
cdef void process(self, int dim[3], np.float64_t left_edge[3],
np.float64_t dds[3], np.int64_t offset,
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -54,7 +54,6 @@
fields = None, int domain_id = -1,
int domain_offset = 0):
cdef int nf, i, j
- self.bad_indices = 0
if fields is None:
fields = []
nf = len(fields)
@@ -66,7 +65,8 @@
tarr = fields[i]
field_pointers[i] = <np.float64_t *> tarr.data
cdef int dims[3]
- dims[0] = dims[1] = dims[2] = 2
+ dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+ cdef int nz = dims[0] * dims[1] * dims[2]
cdef OctInfo oi
cdef np.int64_t offset, moff
cdef Oct *oct
@@ -98,7 +98,7 @@
if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
continue
# Note that this has to be our local index, not our in-file index.
- offset = dom_ind[oct.domain_ind - moff] * 8
+ offset = dom_ind[oct.domain_ind - moff] * nz
if offset < 0: continue
# Check that we found the oct ...
self.process(dims, oi.left_edge, oi.dds,
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -86,7 +86,8 @@
sum(d.total_particles.values()) for d in self.data_files)
pf = self.parameter_file
self.oct_handler = ParticleOctreeContainer(
- [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
+ [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge,
+ over_refine = pf.over_refine_factor)
self.oct_handler.n_ref = pf.n_ref
mylog.info("Allocating for %0.3e particles", self.total_particles)
# No more than 256^3 in the region finder.
@@ -147,8 +148,9 @@
data_files = [self.data_files[i] for i in
self.regions.identify_data_files(dobj.selector)]
base_region = getattr(dobj, "base_region", dobj)
+ oref = self.parameter_file.over_refine_factor
subset = [ParticleOctreeSubset(base_region, data_files,
- self.parameter_file)]
+ self.parameter_file, over_refine_factor = oref)]
dobj._chunk_info = subset
dobj._current_chunk = list(self._chunk_all(dobj))[0]
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -205,6 +205,7 @@
cdef int i, j, k, m, n, ind[3]
cdef Oct *noct
cdef np.uint64_t prefix1, prefix2
+ # TODO: This does not need to be changed.
o.children = <Oct **> malloc(sizeof(Oct *)*8)
for i in range(2):
for j in range(2):
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_smooth.pxd
--- /dev/null
+++ b/yt/geometry/particle_smooth.pxd
@@ -0,0 +1,94 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+ Copyright (C) 2013 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, qsort
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .particle_deposit cimport sph_kernel, gind
+
+cdef extern from "alloca.h":
+ void *alloca(int)
+
+cdef struct NeighborList
+cdef struct NeighborList:
+ np.int64_t pn # Particle number
+ np.float64_t r2 # radius**2
+
+cdef inline np.float64_t r2dist(np.float64_t ppos[3],
+ np.float64_t cpos[3],
+ np.float64_t DW[3],
+ bint periodicity[3]):
+ cdef int i
+ cdef np.float64_t r2, DR
+ r2 = 0.0
+ for i in range(3):
+ DR = (ppos[i] - cpos[i])
+ if (DR > DW[i]/2.0):
+ DR -= DW[i]/2.0
+ elif (DR < -DW[i]/2.0):
+ DR += DW[i]/2.0
+ r2 += DR * DR
+ return r2
+
+cdef class ParticleSmoothOperation:
+ # We assume each will allocate and define their own temporary storage
+ cdef public object nvals
+ cdef np.float64_t DW[3]
+ cdef int nfields
+ cdef int maxn
+ cdef int curn
+ cdef bint periodicity[3]
+ cdef np.int64_t *doffs
+ cdef np.int64_t *pinds
+ cdef np.int64_t *pcounts
+ cdef np.float64_t *ppos
+ # Note that we are preallocating here, so this is *not* threadsafe.
+ cdef NeighborList *neighbors
+ cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+ np.float64_t dds[3], np.float64_t *ppos,
+ np.float64_t **fields, np.int64_t nneighbors,
+ np.int64_t *nind, np.int64_t *doffs,
+ np.int64_t *pinds, np.int64_t *pcounts,
+ np.int64_t offset)
+ cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+ np.float64_t cpos[3])
+ cdef void neighbor_reset(self)
+ cdef void neighbor_find(self,
+ np.int64_t nneighbors,
+ np.int64_t *nind,
+ np.int64_t *doffs,
+ np.int64_t *pcounts,
+ np.int64_t *pinds,
+ np.float64_t *ppos,
+ np.float64_t cpos[3])
+ cdef void process(self, np.int64_t offset, int i, int j, int k,
+ int dim[3], np.float64_t cpos[3], np.float64_t **fields)
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_smooth.pyx
--- /dev/null
+++ b/yt/geometry/particle_smooth.pyx
@@ -0,0 +1,360 @@
+"""
+Particle smoothing in cells
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2013 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, realloc
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, \
+ OctreeContainer, OctInfo
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+ cdef NeighborList *n1, *n2
+ n1 = <NeighborList *> on1
+ n2 = <NeighborList *> on2
+ # Note that we set this up so that "greatest" evaluates to the *end* of the
+ # list, so we can do standard radius comparisons.
+ if n1.r2 < n2.r2:
+ return -1
+ elif n1.r2 == n2.r2:
+ return 0
+ else:
+ return 1
+
+cdef class ParticleSmoothOperation:
+ def __init__(self, nvals, nfields, max_neighbors):
+ # This is the set of cells, in grids, blocks or octs, we are handling.
+ cdef int i
+ self.nvals = nvals
+ self.nfields = nfields
+ self.maxn = max_neighbors
+ self.neighbors = <NeighborList *> malloc(
+ sizeof(NeighborList) * self.maxn)
+ self.neighbor_reset()
+
+ def initialize(self, *args):
+ raise NotImplementedError
+
+ def finalize(self, *args):
+ raise NotImplementedError
+
+ @cython.cdivision(True)
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def process_octree(self, OctreeContainer octree,
+ np.ndarray[np.int64_t, ndim=1] dom_ind,
+ np.ndarray[np.float64_t, ndim=2] positions,
+ fields = None, int domain_id = -1,
+ int domain_offset = 0,
+ periodicity = (True, True, True)):
+ # This will be a several-step operation.
+ #
+ # We first take all of our particles and assign them to Octs. If they
+ # are not in an Oct, we will assume they are out of bounds. Note that
+ # this means that if we have loaded neighbor particles for which an Oct
+ # does not exist, we are going to be discarding them -- so sparse
+ # octrees will need to ensure that neighbor octs *exist*. Particles
+ # will be assigned in a new NumPy array. Note that this incurs
+ # overhead, but reduces complexity as we will now be able to use
+ # argsort.
+ #
+ # After the particles have been assigned to Octs, we process each Oct
+ # individually. We will do this by calling "get" for the *first*
+ # particle in each set of Octs in the sorted list. After this, we get
+ # neighbors for each Oct.
+ #
+ # Now, with the set of neighbors (and thus their indices) we allocate
+ # an array of particles and their fields, fill these in, and call our
+ # process function.
+ #
+ # This is not terribly efficient -- for starters, the neighbor function
+ # is not the most efficient yet. We will also need to handle some
+ # mechanism of an expandable array for holding pointers to Octs, so
+ # that we can deal with >27 neighbors. As I write this comment,
+ # neighbors() only returns 27 neighbors.
+ cdef int nf, i, j, dims[3], n
+ cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
+ cdef int nsize = 0
+ cdef np.int64_t *nind = NULL
+ cdef OctInfo oi
+ cdef Oct *oct, **neighbors = NULL
+ cdef np.int64_t nneighbors, numpart, offset, moff, local_ind
+ cdef np.int64_t *doffs, *pinds, *pcounts, poff
+ cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+ cdef np.ndarray[np.float64_t, ndim=1] tarr
+ dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+ cdef int nz = dims[0] * dims[1] * dims[2]
+ numpart = positions.shape[0]
+ # pcount is the number of particles per oct.
+ pcount = np.zeros_like(dom_ind)
+ # doff is the offset to a given oct in the sorted particles.
+ doff = np.zeros_like(dom_ind) - 1
+ moff = octree.get_domain_offset(domain_id + domain_offset)
+ # pdoms points particles at their octs. So the value in this array, for
+ # a given index, is the local oct index.
+ pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+ nf = len(fields)
+ if fields is None:
+ fields = []
+ field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+ for i in range(nf):
+ tarr = fields[i]
+ field_pointers[i] = <np.float64_t *> tarr.data
+ for i in range(3):
+ self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+ self.periodicity[i] = periodicity[i]
+ for i in range(positions.shape[0]):
+ for j in range(3):
+ pos[j] = positions[i, j]
+ oct = octree.get(pos)
+ if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+ continue
+ # Note that this has to be our local index, not our in-file index.
+ # This is the particle count, which we'll use once we have sorted
+ # the particles to calculate the offsets into each oct's particles.
+ offset = oct.domain_ind - moff
+ pcount[offset] += 1
+ pdoms[i] = offset # We store the *actual* offset.
+ # Now we have oct assignments. Let's sort them.
+ # Note that what we will be providing to our processing functions will
+ # actually be indirectly-sorted fields. This preserves memory at the
+ # expense of additional pointer lookups.
+ pind = np.argsort(pdoms)
+ pind = np.asarray(pind, dtype='int64', order='C')
+ # So what this means is that we now have all the oct-0 particle indices
+ # in order, then the oct-1, etc etc.
+ # This now gives us the indices to the particles for each domain.
+ for i in range(positions.shape[0]):
+ # This value, poff, is the index of the particle in the *unsorted*
+ # arrays.
+ poff = pind[i]
+ offset = pdoms[poff]
+ # If we have yet to assign the starting index to this oct, we do so
+ # now.
+ if doff[offset] < 0: doff[offset] = i
+ # Now doff is full of offsets to the first entry in the pind that
+ # refers to that oct's particles.
+ ppos = <np.float64_t *> positions.data
+ doffs = <np.int64_t*> doff.data
+ pinds = <np.int64_t*> pind.data
+ pcounts = <np.int64_t*> pcount.data
+ nsize = 27
+ nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
+ for i in range(doff.shape[0]):
+ # Nothing assigned.
+ if doff[i] < 0: continue
+ # The first particle assigned to this oct should be the one we
+ # want.
+ poff = pind[doff[i]]
+ for j in range(3):
+ pos[j] = positions[poff, j]
+ oct = octree.get(pos, &oi)
+ if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+ continue
+ offset = dom_ind[oct.domain_ind - moff] * nz
+ neighbors = octree.neighbors(&oi, &nneighbors)
+ # Now we have all our neighbors. And, we should be set for what
+ # else we need to do.
+ if nneighbors > nsize:
+ nind = <np.int64_t *> realloc(
+ nind, sizeof(np.int64_t)*nneighbors)
+ nsize = nneighbors
+ for j in range(nneighbors):
+ nind[j] = neighbors[j].domain_ind - moff
+ for n in range(j):
+ if nind[j] == nind[n]:
+ nind[j] = -1
+ break
+ # This is allocated by the neighbors function, so we deallocate it.
+ free(neighbors)
+ self.neighbor_process(dims, oi.left_edge, oi.dds,
+ ppos, field_pointers, nneighbors, nind, doffs,
+ pinds, pcounts, offset)
+ if nind != NULL:
+ free(nind)
+
+ @cython.cdivision(True)
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def process_grid(self, gobj,
+ np.ndarray[np.float64_t, ndim=2] positions,
+ fields = None):
+ raise NotImplementedError
+
+ cdef void process(self, np.int64_t offset, int i, int j, int k,
+ int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+ raise NotImplementedError
+
+ cdef void neighbor_reset(self):
+ self.curn = 0
+ for i in range(self.maxn):
+ self.neighbors[i].pn = -1
+ self.neighbors[i].r2 = 1e300
+
+ cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+ np.float64_t cpos[3]):
+ cdef NeighborList *cur
+ cdef int i
+ # _c means candidate (what we're evaluating)
+ # _o means other (the item in the list)
+ cdef np.float64_t r2_c, r2_o
+ cdef np.int64_t pn_c, pn_o
+ # If we're less than the maximum number of neighbors, we simply append.
+ # After that, we will sort, and then only compare against the rightmost
+ # entries.
+ if self.curn < self.maxn:
+ cur = &self.neighbors[self.curn]
+ cur.pn = pn
+ cur.r2 = r2dist(ppos, cpos, self.DW, self.periodicity)
+ self.curn += 1
+ if self.curn == self.maxn:
+ # This time we sort it, so that future insertions will be able
+ # to be done in order.
+ qsort(self.neighbors, self.curn, sizeof(NeighborList),
+ Neighbor_compare)
+ return
+ # This will go (curn - 1) through 0.
+ r2_c = r2dist(ppos, cpos, self.DW, self.periodicity)
+ pn_c = pn
+ for i in range((self.curn - 1), -1, -1):
+ # First we evaluate against i. If our candidate radius is greater
+ # than the one we're inspecting, we quit.
+ cur = &self.neighbors[i]
+ r2_o = cur.r2
+ pn_o = cur.pn
+ if r2_c >= r2_o:
+ break
+ # Now we know we need to swap them. First we assign our candidate
+ # values to cur.
+ cur.r2 = r2_c
+ cur.pn = pn_c
+ if i + 1 >= self.maxn:
+ continue # No swapping
+ cur = &self.neighbors[i + 1]
+ cur.r2 = r2_o
+ cur.pn = pn_o
+ # At this point, we've evaluated all the particles and we should have a
+ # sorted set of values. So, we're done.
+
+ cdef void neighbor_find(self,
+ np.int64_t nneighbors,
+ np.int64_t *nind,
+ np.int64_t *doffs,
+ np.int64_t *pcounts,
+ np.int64_t *pinds,
+ np.float64_t *ppos,
+ np.float64_t cpos[3]
+ ):
+ # We are now given the number of neighbors, the indices into the
+ # domains for them, and the number of particles for each.
+ cdef int ni, i, j
+ cdef np.int64_t offset, pn, pc
+ cdef np.float64_t pos[3]
+ self.neighbor_reset()
+ for ni in range(nneighbors):
+ if nind[ni] == -1: continue
+ offset = doffs[nind[ni]]
+ pc = pcounts[nind[ni]]
+ for i in range(pc):
+ pn = pinds[offset + i]
+ for j in range(3):
+ pos[j] = ppos[pn * 3 + j]
+ self.neighbor_eval(pn, pos, cpos)
+
+ cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+ np.float64_t dds[3], np.float64_t *ppos,
+ np.float64_t **fields, np.int64_t nneighbors,
+ np.int64_t *nind, np.int64_t *doffs,
+ np.int64_t *pinds, np.int64_t *pcounts,
+ np.int64_t offset):
+ # Note that we assume that fields[0] == smoothing length in the native
+ # units supplied. We can now iterate over every cell in the block and
+ # every particle to find the nearest. We will use a priority heap.
+ cdef int i, j, k
+ cdef np.float64_t cpos[3]
+ cpos[0] = left_edge[0] + 0.5*dds[0]
+ for i in range(dim[0]):
+ cpos[1] = left_edge[1] + 0.5*dds[1]
+ for j in range(dim[1]):
+ cpos[2] = left_edge[2] + 0.5*dds[2]
+ for k in range(dim[2]):
+ self.neighbor_find(nneighbors, nind, doffs, pcounts,
+ pinds, ppos, cpos)
+ # Now we have all our neighbors in our neighbor list.
+ self.process(offset, i, j, k, dim, cpos, fields)
+ cpos[2] += dds[2]
+ cpos[1] += dds[1]
+ cpos[0] += dds[0]
+
+
+cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
+ cdef np.float64_t **fp
+ cdef public object vals
+ def initialize(self):
+ cdef int i
+ if self.nfields < 4:
+ # We need at least two fields, the smoothing length and the
+ # field to smooth, to operate.
+ raise RuntimeError
+ cdef np.ndarray tarr
+ self.fp = <np.float64_t **> malloc(
+ sizeof(np.float64_t *) * self.nfields)
+ self.vals = []
+ for i in range(self.nfields):
+ tarr = np.zeros(self.nvals, dtype="float64", order="F")
+ self.vals.append(tarr)
+ self.fp[i] = <np.float64_t *> tarr.data
+
+ def finalize(self):
+ free(self.fp)
+ return self.vals
+
+ @cython.cdivision(True)
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ cdef void process(self, np.int64_t offset, int i, int j, int k,
+ int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+ # We have our i, j, k for our cell, as well as the cell position.
+ # We also have a list of neighboring particles with particle numbers.
+ cdef int n, fi
+ cdef np.float64_t weight, r2, val
+ cdef np.int64_t pn
+ for n in range(self.curn):
+ # No normalization for the moment.
+ # fields[0] is the smoothing length.
+ r2 = self.neighbors[n].r2
+ pn = self.neighbors[n].pn
+ # Smoothing kernel weight function
+ weight = sph_kernel(sqrt(r2) / fields[0][pn])
+ # Mass of the particle times the value divided by the Density
+ for fi in range(self.nfields - 3):
+ val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
+ self.fp[fi + 3][gind(i,j,k,dim) + offset] = val * weight
+ return
+
+simple_neighbor_smooth = SimpleNeighborSmooth
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -40,6 +40,9 @@
oct_visitor_function *func,
OctVisitorData *data,
int visit_covered = ?)
+ cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+ np.float64_t spos[3], np.float64_t sdds[3],
+ oct_visitor_function *func, int i, int j, int k)
cdef int select_grid(self, np.float64_t left_edge[3],
np.float64_t right_edge[3],
np.int32_t level, Oct *o = ?) nogil
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -157,16 +157,13 @@
def count_octs(self, OctreeContainer octree, int domain_id = -1):
cdef OctVisitorData data
- data.index = 0
- data.last = -1
- data.domain = domain_id
+ octree.setup_data(&data, domain_id)
octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
return data.index
def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
cdef OctVisitorData data
- data.index = 0
- data.domain = domain_id
+ octree.setup_data(&data, domain_id)
octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
return data.index
@@ -230,6 +227,10 @@
if root.children != NULL:
ch = root.children[cind(i, j, k)]
if iter == 1 and next_level == 1 and ch != NULL:
+ # Note that data.pos is always going to be the
+ # position of the Oct -- it is *not* always going
+ # to be the same as the position of the cell under
+ # investigation.
data.pos[0] = (data.pos[0] << 1) + i
data.pos[1] = (data.pos[1] << 1) + j
data.pos[2] = (data.pos[2] << 1) + k
@@ -242,21 +243,60 @@
data.pos[2] = (data.pos[2] >> 1)
data.level -= 1
elif this_level == 1:
- selected = self.select_cell(spos, sdds)
- if ch != NULL:
- selected *= self.overlap_cells
data.global_index += increment
increment = 0
- data.ind[0] = i
- data.ind[1] = j
- data.ind[2] = k
- func(root, data, selected)
+ self.visit_oct_cells(data, root, ch, spos, sdds,
+ func, i, j, k)
spos[2] += sdds[2]
spos[1] += sdds[1]
spos[0] += sdds[0]
this_level = 0 # We turn this off for the second pass.
iter += 1
+ cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+ np.float64_t spos[3], np.float64_t sdds[3],
+ oct_visitor_function *func, int i, int j, int k):
+ # We can short-circuit the whole process if data.oref == 1.
+ # This saves us some funny-business.
+ cdef int selected
+ if data.oref == 1:
+ selected = self.select_cell(spos, sdds)
+ if ch != NULL:
+ selected *= self.overlap_cells
+ # data.ind refers to the cell, not to the oct.
+ data.ind[0] = i
+ data.ind[1] = j
+ data.ind[2] = k
+ func(root, data, selected)
+ return
+ # Okay, now that we've got that out of the way, we have to do some
+ # other checks here. In this case, spos[] is the position of the
+ # center of a *possible* oct child, which means it is the center of a
+ # cluster of cells. That cluster might have 1, 8, 64, ... cells in it.
+ # But, we can figure it out by calculating the cell dds.
+ cdef np.float64_t dds[3], pos[3]
+ cdef int ci, cj, ck
+ cdef int nr = (1 << (data.oref - 1))
+ for ci in range(3):
+ dds[ci] = sdds[ci] / nr
+ # Boot strap at the first index.
+ pos[0] = (spos[0] - sdds[0]/2.0) + dds[0] * 0.5
+ for ci in range(nr):
+ pos[1] = (spos[1] - sdds[1]/2.0) + dds[1] * 0.5
+ for cj in range(nr):
+ pos[2] = (spos[2] - sdds[2]/2.0) + dds[2] * 0.5
+ for ck in range(nr):
+ selected = self.select_cell(pos, dds)
+ if ch != NULL:
+ selected *= self.overlap_cells
+ data.ind[0] = ci + i * nr
+ data.ind[1] = cj + j * nr
+ data.ind[2] = ck + k * nr
+ func(root, data, selected)
+ pos[2] += dds[2]
+ pos[1] += dds[1]
+ pos[0] += dds[0]
+
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -43,6 +43,15 @@
"yt/geometry/oct_container.pxd",
"yt/geometry/selection_routines.pxd",
"yt/geometry/particle_deposit.pxd"])
+ config.add_extension("particle_smooth",
+ ["yt/geometry/particle_smooth.pyx"],
+ include_dirs=["yt/utilities/lib/"],
+ libraries=["m"],
+ depends=["yt/utilities/lib/fp_utils.pxd",
+ "yt/geometry/oct_container.pxd",
+ "yt/geometry/selection_routines.pxd",
+ "yt/geometry/particle_deposit.pxd",
+ "yt/geometry/particle_smooth.pxd"])
config.add_extension("fake_octree",
["yt/geometry/fake_octree.pyx"],
include_dirs=["yt/utilities/lib/"],
diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -59,6 +59,35 @@
v = np.bincount(bi.astype("int64"))
yield assert_equal, v.max() <= n_ref, True
+def test_particle_overrefine():
+ np.random.seed(int(0x4d3d3d3))
+ pos = []
+ data = {}
+ bbox = []
+ for i, ax in enumerate('xyz'):
+ DW = DRE[i] - DLE[i]
+ LE = DLE[i]
+ data["particle_position_%s" % ax] = \
+ np.random.normal(0.5, scale=0.05, size=(NPART)) * DW + LE
+ bbox.append( [DLE[i], DRE[i]] )
+ bbox = np.array(bbox)
+ _attrs = ('icoords', 'fcoords', 'fwidth', 'ires')
+ for n_ref in [16, 32, 64, 512, 1024]:
+ pf1 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
+ dd1 = pf1.h.all_data()
+ v1 = dict((a, getattr(dd1, a)) for a in _attrs)
+ cv1 = dd1["CellVolumeCode"].sum(dtype="float64")
+ for over_refine in [1, 2, 3]:
+ f = 1 << (3*(over_refine-1))
+ pf2 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref,
+ over_refine_factor = over_refine)
+ dd2 = pf2.h.all_data()
+ v2 = dict((a, getattr(dd2, a)) for a in _attrs)
+ for a in sorted(v1):
+ yield assert_equal, v1[a].size * f, v2[a].size
+ cv2 = dd2["CellVolumeCode"].sum(dtype="float64")
+ yield assert_equal, cv1, cv2
+
if __name__=="__main__":
for i in test_add_particles_random():
i[0](*i[1:])
Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list